linux/mm/vmalloc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 1993  Linus Torvalds
   4 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   5 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   6 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   7 *  Numa awareness, Christoph Lameter, SGI, June 2005
   8 *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
   9 */
  10
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/sched/signal.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/interrupt.h>
  19#include <linux/proc_fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/set_memory.h>
  22#include <linux/debugobjects.h>
  23#include <linux/kallsyms.h>
  24#include <linux/list.h>
  25#include <linux/notifier.h>
  26#include <linux/rbtree.h>
  27#include <linux/xarray.h>
  28#include <linux/io.h>
  29#include <linux/rcupdate.h>
  30#include <linux/pfn.h>
  31#include <linux/kmemleak.h>
  32#include <linux/atomic.h>
  33#include <linux/compiler.h>
  34#include <linux/llist.h>
  35#include <linux/bitops.h>
  36#include <linux/rbtree_augmented.h>
  37#include <linux/overflow.h>
  38#include <linux/pgtable.h>
  39#include <linux/uaccess.h>
  40#include <linux/hugetlb.h>
  41#include <asm/tlbflush.h>
  42#include <asm/shmparam.h>
  43
  44#include "internal.h"
  45#include "pgalloc-track.h"
  46
  47#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  48static bool __ro_after_init vmap_allow_huge = true;
  49
  50static int __init set_nohugevmalloc(char *str)
  51{
  52        vmap_allow_huge = false;
  53        return 0;
  54}
  55early_param("nohugevmalloc", set_nohugevmalloc);
  56#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  57static const bool vmap_allow_huge = false;
  58#endif  /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  59
  60bool is_vmalloc_addr(const void *x)
  61{
  62        unsigned long addr = (unsigned long)x;
  63
  64        return addr >= VMALLOC_START && addr < VMALLOC_END;
  65}
  66EXPORT_SYMBOL(is_vmalloc_addr);
  67
  68struct vfree_deferred {
  69        struct llist_head list;
  70        struct work_struct wq;
  71};
  72static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  73
  74static void __vunmap(const void *, int);
  75
  76static void free_work(struct work_struct *w)
  77{
  78        struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  79        struct llist_node *t, *llnode;
  80
  81        llist_for_each_safe(llnode, t, llist_del_all(&p->list))
  82                __vunmap((void *)llnode, 1);
  83}
  84
  85/*** Page table manipulation functions ***/
  86static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  87                        phys_addr_t phys_addr, pgprot_t prot,
  88                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
  89{
  90        pte_t *pte;
  91        u64 pfn;
  92        unsigned long size = PAGE_SIZE;
  93
  94        pfn = phys_addr >> PAGE_SHIFT;
  95        pte = pte_alloc_kernel_track(pmd, addr, mask);
  96        if (!pte)
  97                return -ENOMEM;
  98        do {
  99                BUG_ON(!pte_none(*pte));
 100
 101#ifdef CONFIG_HUGETLB_PAGE
 102                size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
 103                if (size != PAGE_SIZE) {
 104                        pte_t entry = pfn_pte(pfn, prot);
 105
 106                        entry = pte_mkhuge(entry);
 107                        entry = arch_make_huge_pte(entry, ilog2(size), 0);
 108                        set_huge_pte_at(&init_mm, addr, pte, entry);
 109                        pfn += PFN_DOWN(size);
 110                        continue;
 111                }
 112#endif
 113                set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
 114                pfn++;
 115        } while (pte += PFN_DOWN(size), addr += size, addr != end);
 116        *mask |= PGTBL_PTE_MODIFIED;
 117        return 0;
 118}
 119
 120static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
 121                        phys_addr_t phys_addr, pgprot_t prot,
 122                        unsigned int max_page_shift)
 123{
 124        if (max_page_shift < PMD_SHIFT)
 125                return 0;
 126
 127        if (!arch_vmap_pmd_supported(prot))
 128                return 0;
 129
 130        if ((end - addr) != PMD_SIZE)
 131                return 0;
 132
 133        if (!IS_ALIGNED(addr, PMD_SIZE))
 134                return 0;
 135
 136        if (!IS_ALIGNED(phys_addr, PMD_SIZE))
 137                return 0;
 138
 139        if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
 140                return 0;
 141
 142        return pmd_set_huge(pmd, phys_addr, prot);
 143}
 144
 145static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 146                        phys_addr_t phys_addr, pgprot_t prot,
 147                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 148{
 149        pmd_t *pmd;
 150        unsigned long next;
 151
 152        pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 153        if (!pmd)
 154                return -ENOMEM;
 155        do {
 156                next = pmd_addr_end(addr, end);
 157
 158                if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
 159                                        max_page_shift)) {
 160                        *mask |= PGTBL_PMD_MODIFIED;
 161                        continue;
 162                }
 163
 164                if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
 165                        return -ENOMEM;
 166        } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
 167        return 0;
 168}
 169
 170static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
 171                        phys_addr_t phys_addr, pgprot_t prot,
 172                        unsigned int max_page_shift)
 173{
 174        if (max_page_shift < PUD_SHIFT)
 175                return 0;
 176
 177        if (!arch_vmap_pud_supported(prot))
 178                return 0;
 179
 180        if ((end - addr) != PUD_SIZE)
 181                return 0;
 182
 183        if (!IS_ALIGNED(addr, PUD_SIZE))
 184                return 0;
 185
 186        if (!IS_ALIGNED(phys_addr, PUD_SIZE))
 187                return 0;
 188
 189        if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
 190                return 0;
 191
 192        return pud_set_huge(pud, phys_addr, prot);
 193}
 194
 195static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 196                        phys_addr_t phys_addr, pgprot_t prot,
 197                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 198{
 199        pud_t *pud;
 200        unsigned long next;
 201
 202        pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 203        if (!pud)
 204                return -ENOMEM;
 205        do {
 206                next = pud_addr_end(addr, end);
 207
 208                if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
 209                                        max_page_shift)) {
 210                        *mask |= PGTBL_PUD_MODIFIED;
 211                        continue;
 212                }
 213
 214                if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
 215                                        max_page_shift, mask))
 216                        return -ENOMEM;
 217        } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
 218        return 0;
 219}
 220
 221static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
 222                        phys_addr_t phys_addr, pgprot_t prot,
 223                        unsigned int max_page_shift)
 224{
 225        if (max_page_shift < P4D_SHIFT)
 226                return 0;
 227
 228        if (!arch_vmap_p4d_supported(prot))
 229                return 0;
 230
 231        if ((end - addr) != P4D_SIZE)
 232                return 0;
 233
 234        if (!IS_ALIGNED(addr, P4D_SIZE))
 235                return 0;
 236
 237        if (!IS_ALIGNED(phys_addr, P4D_SIZE))
 238                return 0;
 239
 240        if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
 241                return 0;
 242
 243        return p4d_set_huge(p4d, phys_addr, prot);
 244}
 245
 246static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 247                        phys_addr_t phys_addr, pgprot_t prot,
 248                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 249{
 250        p4d_t *p4d;
 251        unsigned long next;
 252
 253        p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 254        if (!p4d)
 255                return -ENOMEM;
 256        do {
 257                next = p4d_addr_end(addr, end);
 258
 259                if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
 260                                        max_page_shift)) {
 261                        *mask |= PGTBL_P4D_MODIFIED;
 262                        continue;
 263                }
 264
 265                if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
 266                                        max_page_shift, mask))
 267                        return -ENOMEM;
 268        } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
 269        return 0;
 270}
 271
 272static int vmap_range_noflush(unsigned long addr, unsigned long end,
 273                        phys_addr_t phys_addr, pgprot_t prot,
 274                        unsigned int max_page_shift)
 275{
 276        pgd_t *pgd;
 277        unsigned long start;
 278        unsigned long next;
 279        int err;
 280        pgtbl_mod_mask mask = 0;
 281
 282        might_sleep();
 283        BUG_ON(addr >= end);
 284
 285        start = addr;
 286        pgd = pgd_offset_k(addr);
 287        do {
 288                next = pgd_addr_end(addr, end);
 289                err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
 290                                        max_page_shift, &mask);
 291                if (err)
 292                        break;
 293        } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
 294
 295        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 296                arch_sync_kernel_mappings(start, end);
 297
 298        return err;
 299}
 300
 301int vmap_range(unsigned long addr, unsigned long end,
 302                        phys_addr_t phys_addr, pgprot_t prot,
 303                        unsigned int max_page_shift)
 304{
 305        int err;
 306
 307        err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
 308        flush_cache_vmap(addr, end);
 309
 310        return err;
 311}
 312
 313static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 314                             pgtbl_mod_mask *mask)
 315{
 316        pte_t *pte;
 317
 318        pte = pte_offset_kernel(pmd, addr);
 319        do {
 320                pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
 321                WARN_ON(!pte_none(ptent) && !pte_present(ptent));
 322        } while (pte++, addr += PAGE_SIZE, addr != end);
 323        *mask |= PGTBL_PTE_MODIFIED;
 324}
 325
 326static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 327                             pgtbl_mod_mask *mask)
 328{
 329        pmd_t *pmd;
 330        unsigned long next;
 331        int cleared;
 332
 333        pmd = pmd_offset(pud, addr);
 334        do {
 335                next = pmd_addr_end(addr, end);
 336
 337                cleared = pmd_clear_huge(pmd);
 338                if (cleared || pmd_bad(*pmd))
 339                        *mask |= PGTBL_PMD_MODIFIED;
 340
 341                if (cleared)
 342                        continue;
 343                if (pmd_none_or_clear_bad(pmd))
 344                        continue;
 345                vunmap_pte_range(pmd, addr, next, mask);
 346
 347                cond_resched();
 348        } while (pmd++, addr = next, addr != end);
 349}
 350
 351static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 352                             pgtbl_mod_mask *mask)
 353{
 354        pud_t *pud;
 355        unsigned long next;
 356        int cleared;
 357
 358        pud = pud_offset(p4d, addr);
 359        do {
 360                next = pud_addr_end(addr, end);
 361
 362                cleared = pud_clear_huge(pud);
 363                if (cleared || pud_bad(*pud))
 364                        *mask |= PGTBL_PUD_MODIFIED;
 365
 366                if (cleared)
 367                        continue;
 368                if (pud_none_or_clear_bad(pud))
 369                        continue;
 370                vunmap_pmd_range(pud, addr, next, mask);
 371        } while (pud++, addr = next, addr != end);
 372}
 373
 374static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 375                             pgtbl_mod_mask *mask)
 376{
 377        p4d_t *p4d;
 378        unsigned long next;
 379        int cleared;
 380
 381        p4d = p4d_offset(pgd, addr);
 382        do {
 383                next = p4d_addr_end(addr, end);
 384
 385                cleared = p4d_clear_huge(p4d);
 386                if (cleared || p4d_bad(*p4d))
 387                        *mask |= PGTBL_P4D_MODIFIED;
 388
 389                if (cleared)
 390                        continue;
 391                if (p4d_none_or_clear_bad(p4d))
 392                        continue;
 393                vunmap_pud_range(p4d, addr, next, mask);
 394        } while (p4d++, addr = next, addr != end);
 395}
 396
 397/*
 398 * vunmap_range_noflush is similar to vunmap_range, but does not
 399 * flush caches or TLBs.
 400 *
 401 * The caller is responsible for calling flush_cache_vmap() before calling
 402 * this function, and flush_tlb_kernel_range after it has returned
 403 * successfully (and before the addresses are expected to cause a page fault
 404 * or be re-mapped for something else, if TLB flushes are being delayed or
 405 * coalesced).
 406 *
 407 * This is an internal function only. Do not use outside mm/.
 408 */
 409void vunmap_range_noflush(unsigned long start, unsigned long end)
 410{
 411        unsigned long next;
 412        pgd_t *pgd;
 413        unsigned long addr = start;
 414        pgtbl_mod_mask mask = 0;
 415
 416        BUG_ON(addr >= end);
 417        pgd = pgd_offset_k(addr);
 418        do {
 419                next = pgd_addr_end(addr, end);
 420                if (pgd_bad(*pgd))
 421                        mask |= PGTBL_PGD_MODIFIED;
 422                if (pgd_none_or_clear_bad(pgd))
 423                        continue;
 424                vunmap_p4d_range(pgd, addr, next, &mask);
 425        } while (pgd++, addr = next, addr != end);
 426
 427        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 428                arch_sync_kernel_mappings(start, end);
 429}
 430
 431/**
 432 * vunmap_range - unmap kernel virtual addresses
 433 * @addr: start of the VM area to unmap
 434 * @end: end of the VM area to unmap (non-inclusive)
 435 *
 436 * Clears any present PTEs in the virtual address range, flushes TLBs and
 437 * caches. Any subsequent access to the address before it has been re-mapped
 438 * is a kernel bug.
 439 */
 440void vunmap_range(unsigned long addr, unsigned long end)
 441{
 442        flush_cache_vunmap(addr, end);
 443        vunmap_range_noflush(addr, end);
 444        flush_tlb_kernel_range(addr, end);
 445}
 446
 447static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
 448                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 449                pgtbl_mod_mask *mask)
 450{
 451        pte_t *pte;
 452
 453        /*
 454         * nr is a running index into the array which helps higher level
 455         * callers keep track of where we're up to.
 456         */
 457
 458        pte = pte_alloc_kernel_track(pmd, addr, mask);
 459        if (!pte)
 460                return -ENOMEM;
 461        do {
 462                struct page *page = pages[*nr];
 463
 464                if (WARN_ON(!pte_none(*pte)))
 465                        return -EBUSY;
 466                if (WARN_ON(!page))
 467                        return -ENOMEM;
 468                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 469                (*nr)++;
 470        } while (pte++, addr += PAGE_SIZE, addr != end);
 471        *mask |= PGTBL_PTE_MODIFIED;
 472        return 0;
 473}
 474
 475static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
 476                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 477                pgtbl_mod_mask *mask)
 478{
 479        pmd_t *pmd;
 480        unsigned long next;
 481
 482        pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 483        if (!pmd)
 484                return -ENOMEM;
 485        do {
 486                next = pmd_addr_end(addr, end);
 487                if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
 488                        return -ENOMEM;
 489        } while (pmd++, addr = next, addr != end);
 490        return 0;
 491}
 492
 493static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
 494                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 495                pgtbl_mod_mask *mask)
 496{
 497        pud_t *pud;
 498        unsigned long next;
 499
 500        pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 501        if (!pud)
 502                return -ENOMEM;
 503        do {
 504                next = pud_addr_end(addr, end);
 505                if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
 506                        return -ENOMEM;
 507        } while (pud++, addr = next, addr != end);
 508        return 0;
 509}
 510
 511static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
 512                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 513                pgtbl_mod_mask *mask)
 514{
 515        p4d_t *p4d;
 516        unsigned long next;
 517
 518        p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 519        if (!p4d)
 520                return -ENOMEM;
 521        do {
 522                next = p4d_addr_end(addr, end);
 523                if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
 524                        return -ENOMEM;
 525        } while (p4d++, addr = next, addr != end);
 526        return 0;
 527}
 528
 529static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
 530                pgprot_t prot, struct page **pages)
 531{
 532        unsigned long start = addr;
 533        pgd_t *pgd;
 534        unsigned long next;
 535        int err = 0;
 536        int nr = 0;
 537        pgtbl_mod_mask mask = 0;
 538
 539        BUG_ON(addr >= end);
 540        pgd = pgd_offset_k(addr);
 541        do {
 542                next = pgd_addr_end(addr, end);
 543                if (pgd_bad(*pgd))
 544                        mask |= PGTBL_PGD_MODIFIED;
 545                err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
 546                if (err)
 547                        return err;
 548        } while (pgd++, addr = next, addr != end);
 549
 550        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 551                arch_sync_kernel_mappings(start, end);
 552
 553        return 0;
 554}
 555
 556/*
 557 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
 558 * flush caches.
 559 *
 560 * The caller is responsible for calling flush_cache_vmap() after this
 561 * function returns successfully and before the addresses are accessed.
 562 *
 563 * This is an internal function only. Do not use outside mm/.
 564 */
 565int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 566                pgprot_t prot, struct page **pages, unsigned int page_shift)
 567{
 568        unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
 569
 570        WARN_ON(page_shift < PAGE_SHIFT);
 571
 572        if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
 573                        page_shift == PAGE_SHIFT)
 574                return vmap_small_pages_range_noflush(addr, end, prot, pages);
 575
 576        for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
 577                int err;
 578
 579                err = vmap_range_noflush(addr, addr + (1UL << page_shift),
 580                                        __pa(page_address(pages[i])), prot,
 581                                        page_shift);
 582                if (err)
 583                        return err;
 584
 585                addr += 1UL << page_shift;
 586        }
 587
 588        return 0;
 589}
 590
 591/**
 592 * vmap_pages_range - map pages to a kernel virtual address
 593 * @addr: start of the VM area to map
 594 * @end: end of the VM area to map (non-inclusive)
 595 * @prot: page protection flags to use
 596 * @pages: pages to map (always PAGE_SIZE pages)
 597 * @page_shift: maximum shift that the pages may be mapped with, @pages must
 598 * be aligned and contiguous up to at least this shift.
 599 *
 600 * RETURNS:
 601 * 0 on success, -errno on failure.
 602 */
 603static int vmap_pages_range(unsigned long addr, unsigned long end,
 604                pgprot_t prot, struct page **pages, unsigned int page_shift)
 605{
 606        int err;
 607
 608        err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
 609        flush_cache_vmap(addr, end);
 610        return err;
 611}
 612
 613int is_vmalloc_or_module_addr(const void *x)
 614{
 615        /*
 616         * ARM, x86-64 and sparc64 put modules in a special place,
 617         * and fall back on vmalloc() if that fails. Others
 618         * just put it in the vmalloc space.
 619         */
 620#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 621        unsigned long addr = (unsigned long)x;
 622        if (addr >= MODULES_VADDR && addr < MODULES_END)
 623                return 1;
 624#endif
 625        return is_vmalloc_addr(x);
 626}
 627
 628/*
 629 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
 630 * return the tail page that corresponds to the base page address, which
 631 * matches small vmap mappings.
 632 */
 633struct page *vmalloc_to_page(const void *vmalloc_addr)
 634{
 635        unsigned long addr = (unsigned long) vmalloc_addr;
 636        struct page *page = NULL;
 637        pgd_t *pgd = pgd_offset_k(addr);
 638        p4d_t *p4d;
 639        pud_t *pud;
 640        pmd_t *pmd;
 641        pte_t *ptep, pte;
 642
 643        /*
 644         * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 645         * architectures that do not vmalloc module space
 646         */
 647        VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 648
 649        if (pgd_none(*pgd))
 650                return NULL;
 651        if (WARN_ON_ONCE(pgd_leaf(*pgd)))
 652                return NULL; /* XXX: no allowance for huge pgd */
 653        if (WARN_ON_ONCE(pgd_bad(*pgd)))
 654                return NULL;
 655
 656        p4d = p4d_offset(pgd, addr);
 657        if (p4d_none(*p4d))
 658                return NULL;
 659        if (p4d_leaf(*p4d))
 660                return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
 661        if (WARN_ON_ONCE(p4d_bad(*p4d)))
 662                return NULL;
 663
 664        pud = pud_offset(p4d, addr);
 665        if (pud_none(*pud))
 666                return NULL;
 667        if (pud_leaf(*pud))
 668                return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 669        if (WARN_ON_ONCE(pud_bad(*pud)))
 670                return NULL;
 671
 672        pmd = pmd_offset(pud, addr);
 673        if (pmd_none(*pmd))
 674                return NULL;
 675        if (pmd_leaf(*pmd))
 676                return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 677        if (WARN_ON_ONCE(pmd_bad(*pmd)))
 678                return NULL;
 679
 680        ptep = pte_offset_map(pmd, addr);
 681        pte = *ptep;
 682        if (pte_present(pte))
 683                page = pte_page(pte);
 684        pte_unmap(ptep);
 685
 686        return page;
 687}
 688EXPORT_SYMBOL(vmalloc_to_page);
 689
 690/*
 691 * Map a vmalloc()-space virtual address to the physical page frame number.
 692 */
 693unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 694{
 695        return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 696}
 697EXPORT_SYMBOL(vmalloc_to_pfn);
 698
 699
 700/*** Global kva allocator ***/
 701
 702#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 703#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 704
 705
 706static DEFINE_SPINLOCK(vmap_area_lock);
 707static DEFINE_SPINLOCK(free_vmap_area_lock);
 708/* Export for kexec only */
 709LIST_HEAD(vmap_area_list);
 710static struct rb_root vmap_area_root = RB_ROOT;
 711static bool vmap_initialized __read_mostly;
 712
 713static struct rb_root purge_vmap_area_root = RB_ROOT;
 714static LIST_HEAD(purge_vmap_area_list);
 715static DEFINE_SPINLOCK(purge_vmap_area_lock);
 716
 717/*
 718 * This kmem_cache is used for vmap_area objects. Instead of
 719 * allocating from slab we reuse an object from this cache to
 720 * make things faster. Especially in "no edge" splitting of
 721 * free block.
 722 */
 723static struct kmem_cache *vmap_area_cachep;
 724
 725/*
 726 * This linked list is used in pair with free_vmap_area_root.
 727 * It gives O(1) access to prev/next to perform fast coalescing.
 728 */
 729static LIST_HEAD(free_vmap_area_list);
 730
 731/*
 732 * This augment red-black tree represents the free vmap space.
 733 * All vmap_area objects in this tree are sorted by va->va_start
 734 * address. It is used for allocation and merging when a vmap
 735 * object is released.
 736 *
 737 * Each vmap_area node contains a maximum available free block
 738 * of its sub-tree, right or left. Therefore it is possible to
 739 * find a lowest match of free area.
 740 */
 741static struct rb_root free_vmap_area_root = RB_ROOT;
 742
 743/*
 744 * Preload a CPU with one object for "no edge" split case. The
 745 * aim is to get rid of allocations from the atomic context, thus
 746 * to use more permissive allocation masks.
 747 */
 748static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
 749
 750static __always_inline unsigned long
 751va_size(struct vmap_area *va)
 752{
 753        return (va->va_end - va->va_start);
 754}
 755
 756static __always_inline unsigned long
 757get_subtree_max_size(struct rb_node *node)
 758{
 759        struct vmap_area *va;
 760
 761        va = rb_entry_safe(node, struct vmap_area, rb_node);
 762        return va ? va->subtree_max_size : 0;
 763}
 764
 765/*
 766 * Gets called when remove the node and rotate.
 767 */
 768static __always_inline unsigned long
 769compute_subtree_max_size(struct vmap_area *va)
 770{
 771        return max3(va_size(va),
 772                get_subtree_max_size(va->rb_node.rb_left),
 773                get_subtree_max_size(va->rb_node.rb_right));
 774}
 775
 776RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
 777        struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 778
 779static void purge_vmap_area_lazy(void);
 780static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
 781static unsigned long lazy_max_pages(void);
 782
 783static atomic_long_t nr_vmalloc_pages;
 784
 785unsigned long vmalloc_nr_pages(void)
 786{
 787        return atomic_long_read(&nr_vmalloc_pages);
 788}
 789
 790static struct vmap_area *__find_vmap_area(unsigned long addr)
 791{
 792        struct rb_node *n = vmap_area_root.rb_node;
 793
 794        while (n) {
 795                struct vmap_area *va;
 796
 797                va = rb_entry(n, struct vmap_area, rb_node);
 798                if (addr < va->va_start)
 799                        n = n->rb_left;
 800                else if (addr >= va->va_end)
 801                        n = n->rb_right;
 802                else
 803                        return va;
 804        }
 805
 806        return NULL;
 807}
 808
 809/*
 810 * This function returns back addresses of parent node
 811 * and its left or right link for further processing.
 812 *
 813 * Otherwise NULL is returned. In that case all further
 814 * steps regarding inserting of conflicting overlap range
 815 * have to be declined and actually considered as a bug.
 816 */
 817static __always_inline struct rb_node **
 818find_va_links(struct vmap_area *va,
 819        struct rb_root *root, struct rb_node *from,
 820        struct rb_node **parent)
 821{
 822        struct vmap_area *tmp_va;
 823        struct rb_node **link;
 824
 825        if (root) {
 826                link = &root->rb_node;
 827                if (unlikely(!*link)) {
 828                        *parent = NULL;
 829                        return link;
 830                }
 831        } else {
 832                link = &from;
 833        }
 834
 835        /*
 836         * Go to the bottom of the tree. When we hit the last point
 837         * we end up with parent rb_node and correct direction, i name
 838         * it link, where the new va->rb_node will be attached to.
 839         */
 840        do {
 841                tmp_va = rb_entry(*link, struct vmap_area, rb_node);
 842
 843                /*
 844                 * During the traversal we also do some sanity check.
 845                 * Trigger the BUG() if there are sides(left/right)
 846                 * or full overlaps.
 847                 */
 848                if (va->va_start < tmp_va->va_end &&
 849                                va->va_end <= tmp_va->va_start)
 850                        link = &(*link)->rb_left;
 851                else if (va->va_end > tmp_va->va_start &&
 852                                va->va_start >= tmp_va->va_end)
 853                        link = &(*link)->rb_right;
 854                else {
 855                        WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
 856                                va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
 857
 858                        return NULL;
 859                }
 860        } while (*link);
 861
 862        *parent = &tmp_va->rb_node;
 863        return link;
 864}
 865
 866static __always_inline struct list_head *
 867get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
 868{
 869        struct list_head *list;
 870
 871        if (unlikely(!parent))
 872                /*
 873                 * The red-black tree where we try to find VA neighbors
 874                 * before merging or inserting is empty, i.e. it means
 875                 * there is no free vmap space. Normally it does not
 876                 * happen but we handle this case anyway.
 877                 */
 878                return NULL;
 879
 880        list = &rb_entry(parent, struct vmap_area, rb_node)->list;
 881        return (&parent->rb_right == link ? list->next : list);
 882}
 883
 884static __always_inline void
 885link_va(struct vmap_area *va, struct rb_root *root,
 886        struct rb_node *parent, struct rb_node **link, struct list_head *head)
 887{
 888        /*
 889         * VA is still not in the list, but we can
 890         * identify its future previous list_head node.
 891         */
 892        if (likely(parent)) {
 893                head = &rb_entry(parent, struct vmap_area, rb_node)->list;
 894                if (&parent->rb_right != link)
 895                        head = head->prev;
 896        }
 897
 898        /* Insert to the rb-tree */
 899        rb_link_node(&va->rb_node, parent, link);
 900        if (root == &free_vmap_area_root) {
 901                /*
 902                 * Some explanation here. Just perform simple insertion
 903                 * to the tree. We do not set va->subtree_max_size to
 904                 * its current size before calling rb_insert_augmented().
 905                 * It is because of we populate the tree from the bottom
 906                 * to parent levels when the node _is_ in the tree.
 907                 *
 908                 * Therefore we set subtree_max_size to zero after insertion,
 909                 * to let __augment_tree_propagate_from() puts everything to
 910                 * the correct order later on.
 911                 */
 912                rb_insert_augmented(&va->rb_node,
 913                        root, &free_vmap_area_rb_augment_cb);
 914                va->subtree_max_size = 0;
 915        } else {
 916                rb_insert_color(&va->rb_node, root);
 917        }
 918
 919        /* Address-sort this list */
 920        list_add(&va->list, head);
 921}
 922
 923static __always_inline void
 924unlink_va(struct vmap_area *va, struct rb_root *root)
 925{
 926        if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
 927                return;
 928
 929        if (root == &free_vmap_area_root)
 930                rb_erase_augmented(&va->rb_node,
 931                        root, &free_vmap_area_rb_augment_cb);
 932        else
 933                rb_erase(&va->rb_node, root);
 934
 935        list_del(&va->list);
 936        RB_CLEAR_NODE(&va->rb_node);
 937}
 938
 939#if DEBUG_AUGMENT_PROPAGATE_CHECK
 940static void
 941augment_tree_propagate_check(void)
 942{
 943        struct vmap_area *va;
 944        unsigned long computed_size;
 945
 946        list_for_each_entry(va, &free_vmap_area_list, list) {
 947                computed_size = compute_subtree_max_size(va);
 948                if (computed_size != va->subtree_max_size)
 949                        pr_emerg("tree is corrupted: %lu, %lu\n",
 950                                va_size(va), va->subtree_max_size);
 951        }
 952}
 953#endif
 954
 955/*
 956 * This function populates subtree_max_size from bottom to upper
 957 * levels starting from VA point. The propagation must be done
 958 * when VA size is modified by changing its va_start/va_end. Or
 959 * in case of newly inserting of VA to the tree.
 960 *
 961 * It means that __augment_tree_propagate_from() must be called:
 962 * - After VA has been inserted to the tree(free path);
 963 * - After VA has been shrunk(allocation path);
 964 * - After VA has been increased(merging path).
 965 *
 966 * Please note that, it does not mean that upper parent nodes
 967 * and their subtree_max_size are recalculated all the time up
 968 * to the root node.
 969 *
 970 *       4--8
 971 *        /\
 972 *       /  \
 973 *      /    \
 974 *    2--2  8--8
 975 *
 976 * For example if we modify the node 4, shrinking it to 2, then
 977 * no any modification is required. If we shrink the node 2 to 1
 978 * its subtree_max_size is updated only, and set to 1. If we shrink
 979 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
 980 * node becomes 4--6.
 981 */
 982static __always_inline void
 983augment_tree_propagate_from(struct vmap_area *va)
 984{
 985        /*
 986         * Populate the tree from bottom towards the root until
 987         * the calculated maximum available size of checked node
 988         * is equal to its current one.
 989         */
 990        free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
 991
 992#if DEBUG_AUGMENT_PROPAGATE_CHECK
 993        augment_tree_propagate_check();
 994#endif
 995}
 996
 997static void
 998insert_vmap_area(struct vmap_area *va,
 999        struct rb_root *root, struct list_head *head)
1000{
1001        struct rb_node **link;
1002        struct rb_node *parent;
1003
1004        link = find_va_links(va, root, NULL, &parent);
1005        if (link)
1006                link_va(va, root, parent, link, head);
1007}
1008
1009static void
1010insert_vmap_area_augment(struct vmap_area *va,
1011        struct rb_node *from, struct rb_root *root,
1012        struct list_head *head)
1013{
1014        struct rb_node **link;
1015        struct rb_node *parent;
1016
1017        if (from)
1018                link = find_va_links(va, NULL, from, &parent);
1019        else
1020                link = find_va_links(va, root, NULL, &parent);
1021
1022        if (link) {
1023                link_va(va, root, parent, link, head);
1024                augment_tree_propagate_from(va);
1025        }
1026}
1027
1028/*
1029 * Merge de-allocated chunk of VA memory with previous
1030 * and next free blocks. If coalesce is not done a new
1031 * free area is inserted. If VA has been merged, it is
1032 * freed.
1033 *
1034 * Please note, it can return NULL in case of overlap
1035 * ranges, followed by WARN() report. Despite it is a
1036 * buggy behaviour, a system can be alive and keep
1037 * ongoing.
1038 */
1039static __always_inline struct vmap_area *
1040merge_or_add_vmap_area(struct vmap_area *va,
1041        struct rb_root *root, struct list_head *head)
1042{
1043        struct vmap_area *sibling;
1044        struct list_head *next;
1045        struct rb_node **link;
1046        struct rb_node *parent;
1047        bool merged = false;
1048
1049        /*
1050         * Find a place in the tree where VA potentially will be
1051         * inserted, unless it is merged with its sibling/siblings.
1052         */
1053        link = find_va_links(va, root, NULL, &parent);
1054        if (!link)
1055                return NULL;
1056
1057        /*
1058         * Get next node of VA to check if merging can be done.
1059         */
1060        next = get_va_next_sibling(parent, link);
1061        if (unlikely(next == NULL))
1062                goto insert;
1063
1064        /*
1065         * start            end
1066         * |                |
1067         * |<------VA------>|<-----Next----->|
1068         *                  |                |
1069         *                  start            end
1070         */
1071        if (next != head) {
1072                sibling = list_entry(next, struct vmap_area, list);
1073                if (sibling->va_start == va->va_end) {
1074                        sibling->va_start = va->va_start;
1075
1076                        /* Free vmap_area object. */
1077                        kmem_cache_free(vmap_area_cachep, va);
1078
1079                        /* Point to the new merged area. */
1080                        va = sibling;
1081                        merged = true;
1082                }
1083        }
1084
1085        /*
1086         * start            end
1087         * |                |
1088         * |<-----Prev----->|<------VA------>|
1089         *                  |                |
1090         *                  start            end
1091         */
1092        if (next->prev != head) {
1093                sibling = list_entry(next->prev, struct vmap_area, list);
1094                if (sibling->va_end == va->va_start) {
1095                        /*
1096                         * If both neighbors are coalesced, it is important
1097                         * to unlink the "next" node first, followed by merging
1098                         * with "previous" one. Otherwise the tree might not be
1099                         * fully populated if a sibling's augmented value is
1100                         * "normalized" because of rotation operations.
1101                         */
1102                        if (merged)
1103                                unlink_va(va, root);
1104
1105                        sibling->va_end = va->va_end;
1106
1107                        /* Free vmap_area object. */
1108                        kmem_cache_free(vmap_area_cachep, va);
1109
1110                        /* Point to the new merged area. */
1111                        va = sibling;
1112                        merged = true;
1113                }
1114        }
1115
1116insert:
1117        if (!merged)
1118                link_va(va, root, parent, link, head);
1119
1120        return va;
1121}
1122
1123static __always_inline struct vmap_area *
1124merge_or_add_vmap_area_augment(struct vmap_area *va,
1125        struct rb_root *root, struct list_head *head)
1126{
1127        va = merge_or_add_vmap_area(va, root, head);
1128        if (va)
1129                augment_tree_propagate_from(va);
1130
1131        return va;
1132}
1133
1134static __always_inline bool
1135is_within_this_va(struct vmap_area *va, unsigned long size,
1136        unsigned long align, unsigned long vstart)
1137{
1138        unsigned long nva_start_addr;
1139
1140        if (va->va_start > vstart)
1141                nva_start_addr = ALIGN(va->va_start, align);
1142        else
1143                nva_start_addr = ALIGN(vstart, align);
1144
1145        /* Can be overflowed due to big size or alignment. */
1146        if (nva_start_addr + size < nva_start_addr ||
1147                        nva_start_addr < vstart)
1148                return false;
1149
1150        return (nva_start_addr + size <= va->va_end);
1151}
1152
1153/*
1154 * Find the first free block(lowest start address) in the tree,
1155 * that will accomplish the request corresponding to passing
1156 * parameters.
1157 */
1158static __always_inline struct vmap_area *
1159find_vmap_lowest_match(unsigned long size,
1160        unsigned long align, unsigned long vstart)
1161{
1162        struct vmap_area *va;
1163        struct rb_node *node;
1164        unsigned long length;
1165
1166        /* Start from the root. */
1167        node = free_vmap_area_root.rb_node;
1168
1169        /* Adjust the search size for alignment overhead. */
1170        length = size + align - 1;
1171
1172        while (node) {
1173                va = rb_entry(node, struct vmap_area, rb_node);
1174
1175                if (get_subtree_max_size(node->rb_left) >= length &&
1176                                vstart < va->va_start) {
1177                        node = node->rb_left;
1178                } else {
1179                        if (is_within_this_va(va, size, align, vstart))
1180                                return va;
1181
1182                        /*
1183                         * Does not make sense to go deeper towards the right
1184                         * sub-tree if it does not have a free block that is
1185                         * equal or bigger to the requested search length.
1186                         */
1187                        if (get_subtree_max_size(node->rb_right) >= length) {
1188                                node = node->rb_right;
1189                                continue;
1190                        }
1191
1192                        /*
1193                         * OK. We roll back and find the first right sub-tree,
1194                         * that will satisfy the search criteria. It can happen
1195                         * only once due to "vstart" restriction.
1196                         */
1197                        while ((node = rb_parent(node))) {
1198                                va = rb_entry(node, struct vmap_area, rb_node);
1199                                if (is_within_this_va(va, size, align, vstart))
1200                                        return va;
1201
1202                                if (get_subtree_max_size(node->rb_right) >= length &&
1203                                                vstart <= va->va_start) {
1204                                        node = node->rb_right;
1205                                        break;
1206                                }
1207                        }
1208                }
1209        }
1210
1211        return NULL;
1212}
1213
1214#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1215#include <linux/random.h>
1216
1217static struct vmap_area *
1218find_vmap_lowest_linear_match(unsigned long size,
1219        unsigned long align, unsigned long vstart)
1220{
1221        struct vmap_area *va;
1222
1223        list_for_each_entry(va, &free_vmap_area_list, list) {
1224                if (!is_within_this_va(va, size, align, vstart))
1225                        continue;
1226
1227                return va;
1228        }
1229
1230        return NULL;
1231}
1232
1233static void
1234find_vmap_lowest_match_check(unsigned long size)
1235{
1236        struct vmap_area *va_1, *va_2;
1237        unsigned long vstart;
1238        unsigned int rnd;
1239
1240        get_random_bytes(&rnd, sizeof(rnd));
1241        vstart = VMALLOC_START + rnd;
1242
1243        va_1 = find_vmap_lowest_match(size, 1, vstart);
1244        va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
1245
1246        if (va_1 != va_2)
1247                pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1248                        va_1, va_2, vstart);
1249}
1250#endif
1251
1252enum fit_type {
1253        NOTHING_FIT = 0,
1254        FL_FIT_TYPE = 1,        /* full fit */
1255        LE_FIT_TYPE = 2,        /* left edge fit */
1256        RE_FIT_TYPE = 3,        /* right edge fit */
1257        NE_FIT_TYPE = 4         /* no edge fit */
1258};
1259
1260static __always_inline enum fit_type
1261classify_va_fit_type(struct vmap_area *va,
1262        unsigned long nva_start_addr, unsigned long size)
1263{
1264        enum fit_type type;
1265
1266        /* Check if it is within VA. */
1267        if (nva_start_addr < va->va_start ||
1268                        nva_start_addr + size > va->va_end)
1269                return NOTHING_FIT;
1270
1271        /* Now classify. */
1272        if (va->va_start == nva_start_addr) {
1273                if (va->va_end == nva_start_addr + size)
1274                        type = FL_FIT_TYPE;
1275                else
1276                        type = LE_FIT_TYPE;
1277        } else if (va->va_end == nva_start_addr + size) {
1278                type = RE_FIT_TYPE;
1279        } else {
1280                type = NE_FIT_TYPE;
1281        }
1282
1283        return type;
1284}
1285
1286static __always_inline int
1287adjust_va_to_fit_type(struct vmap_area *va,
1288        unsigned long nva_start_addr, unsigned long size,
1289        enum fit_type type)
1290{
1291        struct vmap_area *lva = NULL;
1292
1293        if (type == FL_FIT_TYPE) {
1294                /*
1295                 * No need to split VA, it fully fits.
1296                 *
1297                 * |               |
1298                 * V      NVA      V
1299                 * |---------------|
1300                 */
1301                unlink_va(va, &free_vmap_area_root);
1302                kmem_cache_free(vmap_area_cachep, va);
1303        } else if (type == LE_FIT_TYPE) {
1304                /*
1305                 * Split left edge of fit VA.
1306                 *
1307                 * |       |
1308                 * V  NVA  V   R
1309                 * |-------|-------|
1310                 */
1311                va->va_start += size;
1312        } else if (type == RE_FIT_TYPE) {
1313                /*
1314                 * Split right edge of fit VA.
1315                 *
1316                 *         |       |
1317                 *     L   V  NVA  V
1318                 * |-------|-------|
1319                 */
1320                va->va_end = nva_start_addr;
1321        } else if (type == NE_FIT_TYPE) {
1322                /*
1323                 * Split no edge of fit VA.
1324                 *
1325                 *     |       |
1326                 *   L V  NVA  V R
1327                 * |---|-------|---|
1328                 */
1329                lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1330                if (unlikely(!lva)) {
1331                        /*
1332                         * For percpu allocator we do not do any pre-allocation
1333                         * and leave it as it is. The reason is it most likely
1334                         * never ends up with NE_FIT_TYPE splitting. In case of
1335                         * percpu allocations offsets and sizes are aligned to
1336                         * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1337                         * are its main fitting cases.
1338                         *
1339                         * There are a few exceptions though, as an example it is
1340                         * a first allocation (early boot up) when we have "one"
1341                         * big free space that has to be split.
1342                         *
1343                         * Also we can hit this path in case of regular "vmap"
1344                         * allocations, if "this" current CPU was not preloaded.
1345                         * See the comment in alloc_vmap_area() why. If so, then
1346                         * GFP_NOWAIT is used instead to get an extra object for
1347                         * split purpose. That is rare and most time does not
1348                         * occur.
1349                         *
1350                         * What happens if an allocation gets failed. Basically,
1351                         * an "overflow" path is triggered to purge lazily freed
1352                         * areas to free some memory, then, the "retry" path is
1353                         * triggered to repeat one more time. See more details
1354                         * in alloc_vmap_area() function.
1355                         */
1356                        lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1357                        if (!lva)
1358                                return -1;
1359                }
1360
1361                /*
1362                 * Build the remainder.
1363                 */
1364                lva->va_start = va->va_start;
1365                lva->va_end = nva_start_addr;
1366
1367                /*
1368                 * Shrink this VA to remaining size.
1369                 */
1370                va->va_start = nva_start_addr + size;
1371        } else {
1372                return -1;
1373        }
1374
1375        if (type != FL_FIT_TYPE) {
1376                augment_tree_propagate_from(va);
1377
1378                if (lva)        /* type == NE_FIT_TYPE */
1379                        insert_vmap_area_augment(lva, &va->rb_node,
1380                                &free_vmap_area_root, &free_vmap_area_list);
1381        }
1382
1383        return 0;
1384}
1385
1386/*
1387 * Returns a start address of the newly allocated area, if success.
1388 * Otherwise a vend is returned that indicates failure.
1389 */
1390static __always_inline unsigned long
1391__alloc_vmap_area(unsigned long size, unsigned long align,
1392        unsigned long vstart, unsigned long vend)
1393{
1394        unsigned long nva_start_addr;
1395        struct vmap_area *va;
1396        enum fit_type type;
1397        int ret;
1398
1399        va = find_vmap_lowest_match(size, align, vstart);
1400        if (unlikely(!va))
1401                return vend;
1402
1403        if (va->va_start > vstart)
1404                nva_start_addr = ALIGN(va->va_start, align);
1405        else
1406                nva_start_addr = ALIGN(vstart, align);
1407
1408        /* Check the "vend" restriction. */
1409        if (nva_start_addr + size > vend)
1410                return vend;
1411
1412        /* Classify what we have found. */
1413        type = classify_va_fit_type(va, nva_start_addr, size);
1414        if (WARN_ON_ONCE(type == NOTHING_FIT))
1415                return vend;
1416
1417        /* Update the free vmap_area. */
1418        ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1419        if (ret)
1420                return vend;
1421
1422#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1423        find_vmap_lowest_match_check(size);
1424#endif
1425
1426        return nva_start_addr;
1427}
1428
1429/*
1430 * Free a region of KVA allocated by alloc_vmap_area
1431 */
1432static void free_vmap_area(struct vmap_area *va)
1433{
1434        /*
1435         * Remove from the busy tree/list.
1436         */
1437        spin_lock(&vmap_area_lock);
1438        unlink_va(va, &vmap_area_root);
1439        spin_unlock(&vmap_area_lock);
1440
1441        /*
1442         * Insert/Merge it back to the free tree/list.
1443         */
1444        spin_lock(&free_vmap_area_lock);
1445        merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1446        spin_unlock(&free_vmap_area_lock);
1447}
1448
1449static inline void
1450preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1451{
1452        struct vmap_area *va = NULL;
1453
1454        /*
1455         * Preload this CPU with one extra vmap_area object. It is used
1456         * when fit type of free area is NE_FIT_TYPE. It guarantees that
1457         * a CPU that does an allocation is preloaded.
1458         *
1459         * We do it in non-atomic context, thus it allows us to use more
1460         * permissive allocation masks to be more stable under low memory
1461         * condition and high memory pressure.
1462         */
1463        if (!this_cpu_read(ne_fit_preload_node))
1464                va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1465
1466        spin_lock(lock);
1467
1468        if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1469                kmem_cache_free(vmap_area_cachep, va);
1470}
1471
1472/*
1473 * Allocate a region of KVA of the specified size and alignment, within the
1474 * vstart and vend.
1475 */
1476static struct vmap_area *alloc_vmap_area(unsigned long size,
1477                                unsigned long align,
1478                                unsigned long vstart, unsigned long vend,
1479                                int node, gfp_t gfp_mask)
1480{
1481        struct vmap_area *va;
1482        unsigned long addr;
1483        int purged = 0;
1484        int ret;
1485
1486        BUG_ON(!size);
1487        BUG_ON(offset_in_page(size));
1488        BUG_ON(!is_power_of_2(align));
1489
1490        if (unlikely(!vmap_initialized))
1491                return ERR_PTR(-EBUSY);
1492
1493        might_sleep();
1494        gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1495
1496        va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1497        if (unlikely(!va))
1498                return ERR_PTR(-ENOMEM);
1499
1500        /*
1501         * Only scan the relevant parts containing pointers to other objects
1502         * to avoid false negatives.
1503         */
1504        kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1505
1506retry:
1507        preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1508        addr = __alloc_vmap_area(size, align, vstart, vend);
1509        spin_unlock(&free_vmap_area_lock);
1510
1511        /*
1512         * If an allocation fails, the "vend" address is
1513         * returned. Therefore trigger the overflow path.
1514         */
1515        if (unlikely(addr == vend))
1516                goto overflow;
1517
1518        va->va_start = addr;
1519        va->va_end = addr + size;
1520        va->vm = NULL;
1521
1522        spin_lock(&vmap_area_lock);
1523        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1524        spin_unlock(&vmap_area_lock);
1525
1526        BUG_ON(!IS_ALIGNED(va->va_start, align));
1527        BUG_ON(va->va_start < vstart);
1528        BUG_ON(va->va_end > vend);
1529
1530        ret = kasan_populate_vmalloc(addr, size);
1531        if (ret) {
1532                free_vmap_area(va);
1533                return ERR_PTR(ret);
1534        }
1535
1536        return va;
1537
1538overflow:
1539        if (!purged) {
1540                purge_vmap_area_lazy();
1541                purged = 1;
1542                goto retry;
1543        }
1544
1545        if (gfpflags_allow_blocking(gfp_mask)) {
1546                unsigned long freed = 0;
1547                blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1548                if (freed > 0) {
1549                        purged = 0;
1550                        goto retry;
1551                }
1552        }
1553
1554        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1555                pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1556                        size);
1557
1558        kmem_cache_free(vmap_area_cachep, va);
1559        return ERR_PTR(-EBUSY);
1560}
1561
1562int register_vmap_purge_notifier(struct notifier_block *nb)
1563{
1564        return blocking_notifier_chain_register(&vmap_notify_list, nb);
1565}
1566EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1567
1568int unregister_vmap_purge_notifier(struct notifier_block *nb)
1569{
1570        return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1571}
1572EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1573
1574/*
1575 * lazy_max_pages is the maximum amount of virtual address space we gather up
1576 * before attempting to purge with a TLB flush.
1577 *
1578 * There is a tradeoff here: a larger number will cover more kernel page tables
1579 * and take slightly longer to purge, but it will linearly reduce the number of
1580 * global TLB flushes that must be performed. It would seem natural to scale
1581 * this number up linearly with the number of CPUs (because vmapping activity
1582 * could also scale linearly with the number of CPUs), however it is likely
1583 * that in practice, workloads might be constrained in other ways that mean
1584 * vmap activity will not scale linearly with CPUs. Also, I want to be
1585 * conservative and not introduce a big latency on huge systems, so go with
1586 * a less aggressive log scale. It will still be an improvement over the old
1587 * code, and it will be simple to change the scale factor if we find that it
1588 * becomes a problem on bigger systems.
1589 */
1590static unsigned long lazy_max_pages(void)
1591{
1592        unsigned int log;
1593
1594        log = fls(num_online_cpus());
1595
1596        return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1597}
1598
1599static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1600
1601/*
1602 * Serialize vmap purging.  There is no actual critical section protected
1603 * by this look, but we want to avoid concurrent calls for performance
1604 * reasons and to make the pcpu_get_vm_areas more deterministic.
1605 */
1606static DEFINE_MUTEX(vmap_purge_lock);
1607
1608/* for per-CPU blocks */
1609static void purge_fragmented_blocks_allcpus(void);
1610
1611#ifdef CONFIG_X86_64
1612/*
1613 * called before a call to iounmap() if the caller wants vm_area_struct's
1614 * immediately freed.
1615 */
1616void set_iounmap_nonlazy(void)
1617{
1618        atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1619}
1620#endif /* CONFIG_X86_64 */
1621
1622/*
1623 * Purges all lazily-freed vmap areas.
1624 */
1625static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1626{
1627        unsigned long resched_threshold;
1628        struct list_head local_pure_list;
1629        struct vmap_area *va, *n_va;
1630
1631        lockdep_assert_held(&vmap_purge_lock);
1632
1633        spin_lock(&purge_vmap_area_lock);
1634        purge_vmap_area_root = RB_ROOT;
1635        list_replace_init(&purge_vmap_area_list, &local_pure_list);
1636        spin_unlock(&purge_vmap_area_lock);
1637
1638        if (unlikely(list_empty(&local_pure_list)))
1639                return false;
1640
1641        start = min(start,
1642                list_first_entry(&local_pure_list,
1643                        struct vmap_area, list)->va_start);
1644
1645        end = max(end,
1646                list_last_entry(&local_pure_list,
1647                        struct vmap_area, list)->va_end);
1648
1649        flush_tlb_kernel_range(start, end);
1650        resched_threshold = lazy_max_pages() << 1;
1651
1652        spin_lock(&free_vmap_area_lock);
1653        list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1654                unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1655                unsigned long orig_start = va->va_start;
1656                unsigned long orig_end = va->va_end;
1657
1658                /*
1659                 * Finally insert or merge lazily-freed area. It is
1660                 * detached and there is no need to "unlink" it from
1661                 * anything.
1662                 */
1663                va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1664                                &free_vmap_area_list);
1665
1666                if (!va)
1667                        continue;
1668
1669                if (is_vmalloc_or_module_addr((void *)orig_start))
1670                        kasan_release_vmalloc(orig_start, orig_end,
1671                                              va->va_start, va->va_end);
1672
1673                atomic_long_sub(nr, &vmap_lazy_nr);
1674
1675                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1676                        cond_resched_lock(&free_vmap_area_lock);
1677        }
1678        spin_unlock(&free_vmap_area_lock);
1679        return true;
1680}
1681
1682/*
1683 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1684 * is already purging.
1685 */
1686static void try_purge_vmap_area_lazy(void)
1687{
1688        if (mutex_trylock(&vmap_purge_lock)) {
1689                __purge_vmap_area_lazy(ULONG_MAX, 0);
1690                mutex_unlock(&vmap_purge_lock);
1691        }
1692}
1693
1694/*
1695 * Kick off a purge of the outstanding lazy areas.
1696 */
1697static void purge_vmap_area_lazy(void)
1698{
1699        mutex_lock(&vmap_purge_lock);
1700        purge_fragmented_blocks_allcpus();
1701        __purge_vmap_area_lazy(ULONG_MAX, 0);
1702        mutex_unlock(&vmap_purge_lock);
1703}
1704
1705/*
1706 * Free a vmap area, caller ensuring that the area has been unmapped
1707 * and flush_cache_vunmap had been called for the correct range
1708 * previously.
1709 */
1710static void free_vmap_area_noflush(struct vmap_area *va)
1711{
1712        unsigned long nr_lazy;
1713
1714        spin_lock(&vmap_area_lock);
1715        unlink_va(va, &vmap_area_root);
1716        spin_unlock(&vmap_area_lock);
1717
1718        nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1719                                PAGE_SHIFT, &vmap_lazy_nr);
1720
1721        /*
1722         * Merge or place it to the purge tree/list.
1723         */
1724        spin_lock(&purge_vmap_area_lock);
1725        merge_or_add_vmap_area(va,
1726                &purge_vmap_area_root, &purge_vmap_area_list);
1727        spin_unlock(&purge_vmap_area_lock);
1728
1729        /* After this point, we may free va at any time */
1730        if (unlikely(nr_lazy > lazy_max_pages()))
1731                try_purge_vmap_area_lazy();
1732}
1733
1734/*
1735 * Free and unmap a vmap area
1736 */
1737static void free_unmap_vmap_area(struct vmap_area *va)
1738{
1739        flush_cache_vunmap(va->va_start, va->va_end);
1740        vunmap_range_noflush(va->va_start, va->va_end);
1741        if (debug_pagealloc_enabled_static())
1742                flush_tlb_kernel_range(va->va_start, va->va_end);
1743
1744        free_vmap_area_noflush(va);
1745}
1746
1747static struct vmap_area *find_vmap_area(unsigned long addr)
1748{
1749        struct vmap_area *va;
1750
1751        spin_lock(&vmap_area_lock);
1752        va = __find_vmap_area(addr);
1753        spin_unlock(&vmap_area_lock);
1754
1755        return va;
1756}
1757
1758/*** Per cpu kva allocator ***/
1759
1760/*
1761 * vmap space is limited especially on 32 bit architectures. Ensure there is
1762 * room for at least 16 percpu vmap blocks per CPU.
1763 */
1764/*
1765 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1766 * to #define VMALLOC_SPACE             (VMALLOC_END-VMALLOC_START). Guess
1767 * instead (we just need a rough idea)
1768 */
1769#if BITS_PER_LONG == 32
1770#define VMALLOC_SPACE           (128UL*1024*1024)
1771#else
1772#define VMALLOC_SPACE           (128UL*1024*1024*1024)
1773#endif
1774
1775#define VMALLOC_PAGES           (VMALLOC_SPACE / PAGE_SIZE)
1776#define VMAP_MAX_ALLOC          BITS_PER_LONG   /* 256K with 4K pages */
1777#define VMAP_BBMAP_BITS_MAX     1024    /* 4MB with 4K pages */
1778#define VMAP_BBMAP_BITS_MIN     (VMAP_MAX_ALLOC*2)
1779#define VMAP_MIN(x, y)          ((x) < (y) ? (x) : (y)) /* can't use min() */
1780#define VMAP_MAX(x, y)          ((x) > (y) ? (x) : (y)) /* can't use max() */
1781#define VMAP_BBMAP_BITS         \
1782                VMAP_MIN(VMAP_BBMAP_BITS_MAX,   \
1783                VMAP_MAX(VMAP_BBMAP_BITS_MIN,   \
1784                        VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1785
1786#define VMAP_BLOCK_SIZE         (VMAP_BBMAP_BITS * PAGE_SIZE)
1787
1788struct vmap_block_queue {
1789        spinlock_t lock;
1790        struct list_head free;
1791};
1792
1793struct vmap_block {
1794        spinlock_t lock;
1795        struct vmap_area *va;
1796        unsigned long free, dirty;
1797        unsigned long dirty_min, dirty_max; /*< dirty range */
1798        struct list_head free_list;
1799        struct rcu_head rcu_head;
1800        struct list_head purge;
1801};
1802
1803/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1804static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1805
1806/*
1807 * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1808 * in the free path. Could get rid of this if we change the API to return a
1809 * "cookie" from alloc, to be passed to free. But no big deal yet.
1810 */
1811static DEFINE_XARRAY(vmap_blocks);
1812
1813/*
1814 * We should probably have a fallback mechanism to allocate virtual memory
1815 * out of partially filled vmap blocks. However vmap block sizing should be
1816 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1817 * big problem.
1818 */
1819
1820static unsigned long addr_to_vb_idx(unsigned long addr)
1821{
1822        addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1823        addr /= VMAP_BLOCK_SIZE;
1824        return addr;
1825}
1826
1827static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1828{
1829        unsigned long addr;
1830
1831        addr = va_start + (pages_off << PAGE_SHIFT);
1832        BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1833        return (void *)addr;
1834}
1835
1836/**
1837 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1838 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1839 * @order:    how many 2^order pages should be occupied in newly allocated block
1840 * @gfp_mask: flags for the page level allocator
1841 *
1842 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1843 */
1844static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1845{
1846        struct vmap_block_queue *vbq;
1847        struct vmap_block *vb;
1848        struct vmap_area *va;
1849        unsigned long vb_idx;
1850        int node, err;
1851        void *vaddr;
1852
1853        node = numa_node_id();
1854
1855        vb = kmalloc_node(sizeof(struct vmap_block),
1856                        gfp_mask & GFP_RECLAIM_MASK, node);
1857        if (unlikely(!vb))
1858                return ERR_PTR(-ENOMEM);
1859
1860        va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1861                                        VMALLOC_START, VMALLOC_END,
1862                                        node, gfp_mask);
1863        if (IS_ERR(va)) {
1864                kfree(vb);
1865                return ERR_CAST(va);
1866        }
1867
1868        vaddr = vmap_block_vaddr(va->va_start, 0);
1869        spin_lock_init(&vb->lock);
1870        vb->va = va;
1871        /* At least something should be left free */
1872        BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1873        vb->free = VMAP_BBMAP_BITS - (1UL << order);
1874        vb->dirty = 0;
1875        vb->dirty_min = VMAP_BBMAP_BITS;
1876        vb->dirty_max = 0;
1877        INIT_LIST_HEAD(&vb->free_list);
1878
1879        vb_idx = addr_to_vb_idx(va->va_start);
1880        err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1881        if (err) {
1882                kfree(vb);
1883                free_vmap_area(va);
1884                return ERR_PTR(err);
1885        }
1886
1887        vbq = &get_cpu_var(vmap_block_queue);
1888        spin_lock(&vbq->lock);
1889        list_add_tail_rcu(&vb->free_list, &vbq->free);
1890        spin_unlock(&vbq->lock);
1891        put_cpu_var(vmap_block_queue);
1892
1893        return vaddr;
1894}
1895
1896static void free_vmap_block(struct vmap_block *vb)
1897{
1898        struct vmap_block *tmp;
1899
1900        tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1901        BUG_ON(tmp != vb);
1902
1903        free_vmap_area_noflush(vb->va);
1904        kfree_rcu(vb, rcu_head);
1905}
1906
1907static void purge_fragmented_blocks(int cpu)
1908{
1909        LIST_HEAD(purge);
1910        struct vmap_block *vb;
1911        struct vmap_block *n_vb;
1912        struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1913
1914        rcu_read_lock();
1915        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1916
1917                if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1918                        continue;
1919
1920                spin_lock(&vb->lock);
1921                if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1922                        vb->free = 0; /* prevent further allocs after releasing lock */
1923                        vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1924                        vb->dirty_min = 0;
1925                        vb->dirty_max = VMAP_BBMAP_BITS;
1926                        spin_lock(&vbq->lock);
1927                        list_del_rcu(&vb->free_list);
1928                        spin_unlock(&vbq->lock);
1929                        spin_unlock(&vb->lock);
1930                        list_add_tail(&vb->purge, &purge);
1931                } else
1932                        spin_unlock(&vb->lock);
1933        }
1934        rcu_read_unlock();
1935
1936        list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1937                list_del(&vb->purge);
1938                free_vmap_block(vb);
1939        }
1940}
1941
1942static void purge_fragmented_blocks_allcpus(void)
1943{
1944        int cpu;
1945
1946        for_each_possible_cpu(cpu)
1947                purge_fragmented_blocks(cpu);
1948}
1949
1950static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1951{
1952        struct vmap_block_queue *vbq;
1953        struct vmap_block *vb;
1954        void *vaddr = NULL;
1955        unsigned int order;
1956
1957        BUG_ON(offset_in_page(size));
1958        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1959        if (WARN_ON(size == 0)) {
1960                /*
1961                 * Allocating 0 bytes isn't what caller wants since
1962                 * get_order(0) returns funny result. Just warn and terminate
1963                 * early.
1964                 */
1965                return NULL;
1966        }
1967        order = get_order(size);
1968
1969        rcu_read_lock();
1970        vbq = &get_cpu_var(vmap_block_queue);
1971        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1972                unsigned long pages_off;
1973
1974                spin_lock(&vb->lock);
1975                if (vb->free < (1UL << order)) {
1976                        spin_unlock(&vb->lock);
1977                        continue;
1978                }
1979
1980                pages_off = VMAP_BBMAP_BITS - vb->free;
1981                vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1982                vb->free -= 1UL << order;
1983                if (vb->free == 0) {
1984                        spin_lock(&vbq->lock);
1985                        list_del_rcu(&vb->free_list);
1986                        spin_unlock(&vbq->lock);
1987                }
1988
1989                spin_unlock(&vb->lock);
1990                break;
1991        }
1992
1993        put_cpu_var(vmap_block_queue);
1994        rcu_read_unlock();
1995
1996        /* Allocate new block if nothing was found */
1997        if (!vaddr)
1998                vaddr = new_vmap_block(order, gfp_mask);
1999
2000        return vaddr;
2001}
2002
2003static void vb_free(unsigned long addr, unsigned long size)
2004{
2005        unsigned long offset;
2006        unsigned int order;
2007        struct vmap_block *vb;
2008
2009        BUG_ON(offset_in_page(size));
2010        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2011
2012        flush_cache_vunmap(addr, addr + size);
2013
2014        order = get_order(size);
2015        offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2016        vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
2017
2018        vunmap_range_noflush(addr, addr + size);
2019
2020        if (debug_pagealloc_enabled_static())
2021                flush_tlb_kernel_range(addr, addr + size);
2022
2023        spin_lock(&vb->lock);
2024
2025        /* Expand dirty range */
2026        vb->dirty_min = min(vb->dirty_min, offset);
2027        vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2028
2029        vb->dirty += 1UL << order;
2030        if (vb->dirty == VMAP_BBMAP_BITS) {
2031                BUG_ON(vb->free);
2032                spin_unlock(&vb->lock);
2033                free_vmap_block(vb);
2034        } else
2035                spin_unlock(&vb->lock);
2036}
2037
2038static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2039{
2040        int cpu;
2041
2042        if (unlikely(!vmap_initialized))
2043                return;
2044
2045        might_sleep();
2046
2047        for_each_possible_cpu(cpu) {
2048                struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2049                struct vmap_block *vb;
2050
2051                rcu_read_lock();
2052                list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2053                        spin_lock(&vb->lock);
2054                        if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2055                                unsigned long va_start = vb->va->va_start;
2056                                unsigned long s, e;
2057
2058                                s = va_start + (vb->dirty_min << PAGE_SHIFT);
2059                                e = va_start + (vb->dirty_max << PAGE_SHIFT);
2060
2061                                start = min(s, start);
2062                                end   = max(e, end);
2063
2064                                flush = 1;
2065                        }
2066                        spin_unlock(&vb->lock);
2067                }
2068                rcu_read_unlock();
2069        }
2070
2071        mutex_lock(&vmap_purge_lock);
2072        purge_fragmented_blocks_allcpus();
2073        if (!__purge_vmap_area_lazy(start, end) && flush)
2074                flush_tlb_kernel_range(start, end);
2075        mutex_unlock(&vmap_purge_lock);
2076}
2077
2078/**
2079 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2080 *
2081 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2082 * to amortize TLB flushing overheads. What this means is that any page you
2083 * have now, may, in a former life, have been mapped into kernel virtual
2084 * address by the vmap layer and so there might be some CPUs with TLB entries
2085 * still referencing that page (additional to the regular 1:1 kernel mapping).
2086 *
2087 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2088 * be sure that none of the pages we have control over will have any aliases
2089 * from the vmap layer.
2090 */
2091void vm_unmap_aliases(void)
2092{
2093        unsigned long start = ULONG_MAX, end = 0;
2094        int flush = 0;
2095
2096        _vm_unmap_aliases(start, end, flush);
2097}
2098EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2099
2100/**
2101 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2102 * @mem: the pointer returned by vm_map_ram
2103 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2104 */
2105void vm_unmap_ram(const void *mem, unsigned int count)
2106{
2107        unsigned long size = (unsigned long)count << PAGE_SHIFT;
2108        unsigned long addr = (unsigned long)mem;
2109        struct vmap_area *va;
2110
2111        might_sleep();
2112        BUG_ON(!addr);
2113        BUG_ON(addr < VMALLOC_START);
2114        BUG_ON(addr > VMALLOC_END);
2115        BUG_ON(!PAGE_ALIGNED(addr));
2116
2117        kasan_poison_vmalloc(mem, size);
2118
2119        if (likely(count <= VMAP_MAX_ALLOC)) {
2120                debug_check_no_locks_freed(mem, size);
2121                vb_free(addr, size);
2122                return;
2123        }
2124
2125        va = find_vmap_area(addr);
2126        BUG_ON(!va);
2127        debug_check_no_locks_freed((void *)va->va_start,
2128                                    (va->va_end - va->va_start));
2129        free_unmap_vmap_area(va);
2130}
2131EXPORT_SYMBOL(vm_unmap_ram);
2132
2133/**
2134 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2135 * @pages: an array of pointers to the pages to be mapped
2136 * @count: number of pages
2137 * @node: prefer to allocate data structures on this node
2138 *
2139 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2140 * faster than vmap so it's good.  But if you mix long-life and short-life
2141 * objects with vm_map_ram(), it could consume lots of address space through
2142 * fragmentation (especially on a 32bit machine).  You could see failures in
2143 * the end.  Please use this function for short-lived objects.
2144 *
2145 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2146 */
2147void *vm_map_ram(struct page **pages, unsigned int count, int node)
2148{
2149        unsigned long size = (unsigned long)count << PAGE_SHIFT;
2150        unsigned long addr;
2151        void *mem;
2152
2153        if (likely(count <= VMAP_MAX_ALLOC)) {
2154                mem = vb_alloc(size, GFP_KERNEL);
2155                if (IS_ERR(mem))
2156                        return NULL;
2157                addr = (unsigned long)mem;
2158        } else {
2159                struct vmap_area *va;
2160                va = alloc_vmap_area(size, PAGE_SIZE,
2161                                VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2162                if (IS_ERR(va))
2163                        return NULL;
2164
2165                addr = va->va_start;
2166                mem = (void *)addr;
2167        }
2168
2169        kasan_unpoison_vmalloc(mem, size);
2170
2171        if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2172                                pages, PAGE_SHIFT) < 0) {
2173                vm_unmap_ram(mem, count);
2174                return NULL;
2175        }
2176
2177        return mem;
2178}
2179EXPORT_SYMBOL(vm_map_ram);
2180
2181static struct vm_struct *vmlist __initdata;
2182
2183static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2184{
2185#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2186        return vm->page_order;
2187#else
2188        return 0;
2189#endif
2190}
2191
2192static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2193{
2194#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2195        vm->page_order = order;
2196#else
2197        BUG_ON(order != 0);
2198#endif
2199}
2200
2201/**
2202 * vm_area_add_early - add vmap area early during boot
2203 * @vm: vm_struct to add
2204 *
2205 * This function is used to add fixed kernel vm area to vmlist before
2206 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2207 * should contain proper values and the other fields should be zero.
2208 *
2209 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2210 */
2211void __init vm_area_add_early(struct vm_struct *vm)
2212{
2213        struct vm_struct *tmp, **p;
2214
2215        BUG_ON(vmap_initialized);
2216        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2217                if (tmp->addr >= vm->addr) {
2218                        BUG_ON(tmp->addr < vm->addr + vm->size);
2219                        break;
2220                } else
2221                        BUG_ON(tmp->addr + tmp->size > vm->addr);
2222        }
2223        vm->next = *p;
2224        *p = vm;
2225}
2226
2227/**
2228 * vm_area_register_early - register vmap area early during boot
2229 * @vm: vm_struct to register
2230 * @align: requested alignment
2231 *
2232 * This function is used to register kernel vm area before
2233 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2234 * proper values on entry and other fields should be zero.  On return,
2235 * vm->addr contains the allocated address.
2236 *
2237 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2238 */
2239void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2240{
2241        static size_t vm_init_off __initdata;
2242        unsigned long addr;
2243
2244        addr = ALIGN(VMALLOC_START + vm_init_off, align);
2245        vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
2246
2247        vm->addr = (void *)addr;
2248
2249        vm_area_add_early(vm);
2250}
2251
2252static void vmap_init_free_space(void)
2253{
2254        unsigned long vmap_start = 1;
2255        const unsigned long vmap_end = ULONG_MAX;
2256        struct vmap_area *busy, *free;
2257
2258        /*
2259         *     B     F     B     B     B     F
2260         * -|-----|.....|-----|-----|-----|.....|-
2261         *  |           The KVA space           |
2262         *  |<--------------------------------->|
2263         */
2264        list_for_each_entry(busy, &vmap_area_list, list) {
2265                if (busy->va_start - vmap_start > 0) {
2266                        free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2267                        if (!WARN_ON_ONCE(!free)) {
2268                                free->va_start = vmap_start;
2269                                free->va_end = busy->va_start;
2270
2271                                insert_vmap_area_augment(free, NULL,
2272                                        &free_vmap_area_root,
2273                                                &free_vmap_area_list);
2274                        }
2275                }
2276
2277                vmap_start = busy->va_end;
2278        }
2279
2280        if (vmap_end - vmap_start > 0) {
2281                free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2282                if (!WARN_ON_ONCE(!free)) {
2283                        free->va_start = vmap_start;
2284                        free->va_end = vmap_end;
2285
2286                        insert_vmap_area_augment(free, NULL,
2287                                &free_vmap_area_root,
2288                                        &free_vmap_area_list);
2289                }
2290        }
2291}
2292
2293void __init vmalloc_init(void)
2294{
2295        struct vmap_area *va;
2296        struct vm_struct *tmp;
2297        int i;
2298
2299        /*
2300         * Create the cache for vmap_area objects.
2301         */
2302        vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2303
2304        for_each_possible_cpu(i) {
2305                struct vmap_block_queue *vbq;
2306                struct vfree_deferred *p;
2307
2308                vbq = &per_cpu(vmap_block_queue, i);
2309                spin_lock_init(&vbq->lock);
2310                INIT_LIST_HEAD(&vbq->free);
2311                p = &per_cpu(vfree_deferred, i);
2312                init_llist_head(&p->list);
2313                INIT_WORK(&p->wq, free_work);
2314        }
2315
2316        /* Import existing vmlist entries. */
2317        for (tmp = vmlist; tmp; tmp = tmp->next) {
2318                va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2319                if (WARN_ON_ONCE(!va))
2320                        continue;
2321
2322                va->va_start = (unsigned long)tmp->addr;
2323                va->va_end = va->va_start + tmp->size;
2324                va->vm = tmp;
2325                insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2326        }
2327
2328        /*
2329         * Now we can initialize a free vmap space.
2330         */
2331        vmap_init_free_space();
2332        vmap_initialized = true;
2333}
2334
2335static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2336        struct vmap_area *va, unsigned long flags, const void *caller)
2337{
2338        vm->flags = flags;
2339        vm->addr = (void *)va->va_start;
2340        vm->size = va->va_end - va->va_start;
2341        vm->caller = caller;
2342        va->vm = vm;
2343}
2344
2345static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2346                              unsigned long flags, const void *caller)
2347{
2348        spin_lock(&vmap_area_lock);
2349        setup_vmalloc_vm_locked(vm, va, flags, caller);
2350        spin_unlock(&vmap_area_lock);
2351}
2352
2353static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2354{
2355        /*
2356         * Before removing VM_UNINITIALIZED,
2357         * we should make sure that vm has proper values.
2358         * Pair with smp_rmb() in show_numa_info().
2359         */
2360        smp_wmb();
2361        vm->flags &= ~VM_UNINITIALIZED;
2362}
2363
2364static struct vm_struct *__get_vm_area_node(unsigned long size,
2365                unsigned long align, unsigned long shift, unsigned long flags,
2366                unsigned long start, unsigned long end, int node,
2367                gfp_t gfp_mask, const void *caller)
2368{
2369        struct vmap_area *va;
2370        struct vm_struct *area;
2371        unsigned long requested_size = size;
2372
2373        BUG_ON(in_interrupt());
2374        size = ALIGN(size, 1ul << shift);
2375        if (unlikely(!size))
2376                return NULL;
2377
2378        if (flags & VM_IOREMAP)
2379                align = 1ul << clamp_t(int, get_count_order_long(size),
2380                                       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2381
2382        area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2383        if (unlikely(!area))
2384                return NULL;
2385
2386        if (!(flags & VM_NO_GUARD))
2387                size += PAGE_SIZE;
2388
2389        va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2390        if (IS_ERR(va)) {
2391                kfree(area);
2392                return NULL;
2393        }
2394
2395        kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2396
2397        setup_vmalloc_vm(area, va, flags, caller);
2398
2399        return area;
2400}
2401
2402struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2403                                       unsigned long start, unsigned long end,
2404                                       const void *caller)
2405{
2406        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2407                                  NUMA_NO_NODE, GFP_KERNEL, caller);
2408}
2409
2410/**
2411 * get_vm_area - reserve a contiguous kernel virtual area
2412 * @size:        size of the area
2413 * @flags:       %VM_IOREMAP for I/O mappings or VM_ALLOC
2414 *
2415 * Search an area of @size in the kernel virtual mapping area,
2416 * and reserved it for out purposes.  Returns the area descriptor
2417 * on success or %NULL on failure.
2418 *
2419 * Return: the area descriptor on success or %NULL on failure.
2420 */
2421struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2422{
2423        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2424                                  VMALLOC_START, VMALLOC_END,
2425                                  NUMA_NO_NODE, GFP_KERNEL,
2426                                  __builtin_return_address(0));
2427}
2428
2429struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2430                                const void *caller)
2431{
2432        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2433                                  VMALLOC_START, VMALLOC_END,
2434                                  NUMA_NO_NODE, GFP_KERNEL, caller);
2435}
2436
2437/**
2438 * find_vm_area - find a continuous kernel virtual area
2439 * @addr:         base address
2440 *
2441 * Search for the kernel VM area starting at @addr, and return it.
2442 * It is up to the caller to do all required locking to keep the returned
2443 * pointer valid.
2444 *
2445 * Return: the area descriptor on success or %NULL on failure.
2446 */
2447struct vm_struct *find_vm_area(const void *addr)
2448{
2449        struct vmap_area *va;
2450
2451        va = find_vmap_area((unsigned long)addr);
2452        if (!va)
2453                return NULL;
2454
2455        return va->vm;
2456}
2457
2458/**
2459 * remove_vm_area - find and remove a continuous kernel virtual area
2460 * @addr:           base address
2461 *
2462 * Search for the kernel VM area starting at @addr, and remove it.
2463 * This function returns the found VM area, but using it is NOT safe
2464 * on SMP machines, except for its size or flags.
2465 *
2466 * Return: the area descriptor on success or %NULL on failure.
2467 */
2468struct vm_struct *remove_vm_area(const void *addr)
2469{
2470        struct vmap_area *va;
2471
2472        might_sleep();
2473
2474        spin_lock(&vmap_area_lock);
2475        va = __find_vmap_area((unsigned long)addr);
2476        if (va && va->vm) {
2477                struct vm_struct *vm = va->vm;
2478
2479                va->vm = NULL;
2480                spin_unlock(&vmap_area_lock);
2481
2482                kasan_free_shadow(vm);
2483                free_unmap_vmap_area(va);
2484
2485                return vm;
2486        }
2487
2488        spin_unlock(&vmap_area_lock);
2489        return NULL;
2490}
2491
2492static inline void set_area_direct_map(const struct vm_struct *area,
2493                                       int (*set_direct_map)(struct page *page))
2494{
2495        int i;
2496
2497        /* HUGE_VMALLOC passes small pages to set_direct_map */
2498        for (i = 0; i < area->nr_pages; i++)
2499                if (page_address(area->pages[i]))
2500                        set_direct_map(area->pages[i]);
2501}
2502
2503/* Handle removing and resetting vm mappings related to the vm_struct. */
2504static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2505{
2506        unsigned long start = ULONG_MAX, end = 0;
2507        unsigned int page_order = vm_area_page_order(area);
2508        int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2509        int flush_dmap = 0;
2510        int i;
2511
2512        remove_vm_area(area->addr);
2513
2514        /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2515        if (!flush_reset)
2516                return;
2517
2518        /*
2519         * If not deallocating pages, just do the flush of the VM area and
2520         * return.
2521         */
2522        if (!deallocate_pages) {
2523                vm_unmap_aliases();
2524                return;
2525        }
2526
2527        /*
2528         * If execution gets here, flush the vm mapping and reset the direct
2529         * map. Find the start and end range of the direct mappings to make sure
2530         * the vm_unmap_aliases() flush includes the direct map.
2531         */
2532        for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2533                unsigned long addr = (unsigned long)page_address(area->pages[i]);
2534                if (addr) {
2535                        unsigned long page_size;
2536
2537                        page_size = PAGE_SIZE << page_order;
2538                        start = min(addr, start);
2539                        end = max(addr + page_size, end);
2540                        flush_dmap = 1;
2541                }
2542        }
2543
2544        /*
2545         * Set direct map to something invalid so that it won't be cached if
2546         * there are any accesses after the TLB flush, then flush the TLB and
2547         * reset the direct map permissions to the default.
2548         */
2549        set_area_direct_map(area, set_direct_map_invalid_noflush);
2550        _vm_unmap_aliases(start, end, flush_dmap);
2551        set_area_direct_map(area, set_direct_map_default_noflush);
2552}
2553
2554static void __vunmap(const void *addr, int deallocate_pages)
2555{
2556        struct vm_struct *area;
2557
2558        if (!addr)
2559                return;
2560
2561        if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2562                        addr))
2563                return;
2564
2565        area = find_vm_area(addr);
2566        if (unlikely(!area)) {
2567                WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2568                                addr);
2569                return;
2570        }
2571
2572        debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2573        debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2574
2575        kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2576
2577        vm_remove_mappings(area, deallocate_pages);
2578
2579        if (deallocate_pages) {
2580                unsigned int page_order = vm_area_page_order(area);
2581                int i;
2582
2583                for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2584                        struct page *page = area->pages[i];
2585
2586                        BUG_ON(!page);
2587                        __free_pages(page, page_order);
2588                        cond_resched();
2589                }
2590                atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2591
2592                kvfree(area->pages);
2593        }
2594
2595        kfree(area);
2596}
2597
2598static inline void __vfree_deferred(const void *addr)
2599{
2600        /*
2601         * Use raw_cpu_ptr() because this can be called from preemptible
2602         * context. Preemption is absolutely fine here, because the llist_add()
2603         * implementation is lockless, so it works even if we are adding to
2604         * another cpu's list. schedule_work() should be fine with this too.
2605         */
2606        struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2607
2608        if (llist_add((struct llist_node *)addr, &p->list))
2609                schedule_work(&p->wq);
2610}
2611
2612/**
2613 * vfree_atomic - release memory allocated by vmalloc()
2614 * @addr:         memory base address
2615 *
2616 * This one is just like vfree() but can be called in any atomic context
2617 * except NMIs.
2618 */
2619void vfree_atomic(const void *addr)
2620{
2621        BUG_ON(in_nmi());
2622
2623        kmemleak_free(addr);
2624
2625        if (!addr)
2626                return;
2627        __vfree_deferred(addr);
2628}
2629
2630static void __vfree(const void *addr)
2631{
2632        if (unlikely(in_interrupt()))
2633                __vfree_deferred(addr);
2634        else
2635                __vunmap(addr, 1);
2636}
2637
2638/**
2639 * vfree - Release memory allocated by vmalloc()
2640 * @addr:  Memory base address
2641 *
2642 * Free the virtually continuous memory area starting at @addr, as obtained
2643 * from one of the vmalloc() family of APIs.  This will usually also free the
2644 * physical memory underlying the virtual allocation, but that memory is
2645 * reference counted, so it will not be freed until the last user goes away.
2646 *
2647 * If @addr is NULL, no operation is performed.
2648 *
2649 * Context:
2650 * May sleep if called *not* from interrupt context.
2651 * Must not be called in NMI context (strictly speaking, it could be
2652 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2653 * conventions for vfree() arch-dependent would be a really bad idea).
2654 */
2655void vfree(const void *addr)
2656{
2657        BUG_ON(in_nmi());
2658
2659        kmemleak_free(addr);
2660
2661        might_sleep_if(!in_interrupt());
2662
2663        if (!addr)
2664                return;
2665
2666        __vfree(addr);
2667}
2668EXPORT_SYMBOL(vfree);
2669
2670/**
2671 * vunmap - release virtual mapping obtained by vmap()
2672 * @addr:   memory base address
2673 *
2674 * Free the virtually contiguous memory area starting at @addr,
2675 * which was created from the page array passed to vmap().
2676 *
2677 * Must not be called in interrupt context.
2678 */
2679void vunmap(const void *addr)
2680{
2681        BUG_ON(in_interrupt());
2682        might_sleep();
2683        if (addr)
2684                __vunmap(addr, 0);
2685}
2686EXPORT_SYMBOL(vunmap);
2687
2688/**
2689 * vmap - map an array of pages into virtually contiguous space
2690 * @pages: array of page pointers
2691 * @count: number of pages to map
2692 * @flags: vm_area->flags
2693 * @prot: page protection for the mapping
2694 *
2695 * Maps @count pages from @pages into contiguous kernel virtual space.
2696 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2697 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2698 * are transferred from the caller to vmap(), and will be freed / dropped when
2699 * vfree() is called on the return value.
2700 *
2701 * Return: the address of the area or %NULL on failure
2702 */
2703void *vmap(struct page **pages, unsigned int count,
2704           unsigned long flags, pgprot_t prot)
2705{
2706        struct vm_struct *area;
2707        unsigned long addr;
2708        unsigned long size;             /* In bytes */
2709
2710        might_sleep();
2711
2712        if (count > totalram_pages())
2713                return NULL;
2714
2715        size = (unsigned long)count << PAGE_SHIFT;
2716        area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2717        if (!area)
2718                return NULL;
2719
2720        addr = (unsigned long)area->addr;
2721        if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2722                                pages, PAGE_SHIFT) < 0) {
2723                vunmap(area->addr);
2724                return NULL;
2725        }
2726
2727        if (flags & VM_MAP_PUT_PAGES) {
2728                area->pages = pages;
2729                area->nr_pages = count;
2730        }
2731        return area->addr;
2732}
2733EXPORT_SYMBOL(vmap);
2734
2735#ifdef CONFIG_VMAP_PFN
2736struct vmap_pfn_data {
2737        unsigned long   *pfns;
2738        pgprot_t        prot;
2739        unsigned int    idx;
2740};
2741
2742static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2743{
2744        struct vmap_pfn_data *data = private;
2745
2746        if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2747                return -EINVAL;
2748        *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2749        return 0;
2750}
2751
2752/**
2753 * vmap_pfn - map an array of PFNs into virtually contiguous space
2754 * @pfns: array of PFNs
2755 * @count: number of pages to map
2756 * @prot: page protection for the mapping
2757 *
2758 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2759 * the start address of the mapping.
2760 */
2761void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2762{
2763        struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2764        struct vm_struct *area;
2765
2766        area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2767                        __builtin_return_address(0));
2768        if (!area)
2769                return NULL;
2770        if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2771                        count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2772                free_vm_area(area);
2773                return NULL;
2774        }
2775        return area->addr;
2776}
2777EXPORT_SYMBOL_GPL(vmap_pfn);
2778#endif /* CONFIG_VMAP_PFN */
2779
2780static inline unsigned int
2781vm_area_alloc_pages(gfp_t gfp, int nid,
2782                unsigned int order, unsigned long nr_pages, struct page **pages)
2783{
2784        unsigned int nr_allocated = 0;
2785
2786        /*
2787         * For order-0 pages we make use of bulk allocator, if
2788         * the page array is partly or not at all populated due
2789         * to fails, fallback to a single page allocator that is
2790         * more permissive.
2791         */
2792        if (!order)
2793                nr_allocated = alloc_pages_bulk_array_node(
2794                        gfp, nid, nr_pages, pages);
2795        else
2796                /*
2797                 * Compound pages required for remap_vmalloc_page if
2798                 * high-order pages.
2799                 */
2800                gfp |= __GFP_COMP;
2801
2802        /* High-order pages or fallback path if "bulk" fails. */
2803        while (nr_allocated < nr_pages) {
2804                struct page *page;
2805                int i;
2806
2807                page = alloc_pages_node(nid, gfp, order);
2808                if (unlikely(!page))
2809                        break;
2810
2811                /*
2812                 * Careful, we allocate and map page-order pages, but
2813                 * tracking is done per PAGE_SIZE page so as to keep the
2814                 * vm_struct APIs independent of the physical/mapped size.
2815                 */
2816                for (i = 0; i < (1U << order); i++)
2817                        pages[nr_allocated + i] = page + i;
2818
2819                if (gfpflags_allow_blocking(gfp))
2820                        cond_resched();
2821
2822                nr_allocated += 1U << order;
2823        }
2824
2825        return nr_allocated;
2826}
2827
2828static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2829                                 pgprot_t prot, unsigned int page_shift,
2830                                 int node)
2831{
2832        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2833        unsigned long addr = (unsigned long)area->addr;
2834        unsigned long size = get_vm_area_size(area);
2835        unsigned long array_size;
2836        unsigned int nr_small_pages = size >> PAGE_SHIFT;
2837        unsigned int page_order;
2838
2839        array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2840        gfp_mask |= __GFP_NOWARN;
2841        if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2842                gfp_mask |= __GFP_HIGHMEM;
2843
2844        /* Please note that the recursion is strictly bounded. */
2845        if (array_size > PAGE_SIZE) {
2846                area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2847                                        area->caller);
2848        } else {
2849                area->pages = kmalloc_node(array_size, nested_gfp, node);
2850        }
2851
2852        if (!area->pages) {
2853                warn_alloc(gfp_mask, NULL,
2854                        "vmalloc error: size %lu, failed to allocated page array size %lu",
2855                        nr_small_pages * PAGE_SIZE, array_size);
2856                free_vm_area(area);
2857                return NULL;
2858        }
2859
2860        set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2861        page_order = vm_area_page_order(area);
2862
2863        area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
2864                page_order, nr_small_pages, area->pages);
2865
2866        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2867
2868        /*
2869         * If not enough pages were obtained to accomplish an
2870         * allocation request, free them via __vfree() if any.
2871         */
2872        if (area->nr_pages != nr_small_pages) {
2873                warn_alloc(gfp_mask, NULL,
2874                        "vmalloc error: size %lu, page order %u, failed to allocate pages",
2875                        area->nr_pages * PAGE_SIZE, page_order);
2876                goto fail;
2877        }
2878
2879        if (vmap_pages_range(addr, addr + size, prot, area->pages,
2880                        page_shift) < 0) {
2881                warn_alloc(gfp_mask, NULL,
2882                        "vmalloc error: size %lu, failed to map pages",
2883                        area->nr_pages * PAGE_SIZE);
2884                goto fail;
2885        }
2886
2887        return area->addr;
2888
2889fail:
2890        __vfree(area->addr);
2891        return NULL;
2892}
2893
2894/**
2895 * __vmalloc_node_range - allocate virtually contiguous memory
2896 * @size:                 allocation size
2897 * @align:                desired alignment
2898 * @start:                vm area range start
2899 * @end:                  vm area range end
2900 * @gfp_mask:             flags for the page level allocator
2901 * @prot:                 protection mask for the allocated pages
2902 * @vm_flags:             additional vm area flags (e.g. %VM_NO_GUARD)
2903 * @node:                 node to use for allocation or NUMA_NO_NODE
2904 * @caller:               caller's return address
2905 *
2906 * Allocate enough pages to cover @size from the page level
2907 * allocator with @gfp_mask flags.  Map them into contiguous
2908 * kernel virtual space, using a pagetable protection of @prot.
2909 *
2910 * Return: the address of the area or %NULL on failure
2911 */
2912void *__vmalloc_node_range(unsigned long size, unsigned long align,
2913                        unsigned long start, unsigned long end, gfp_t gfp_mask,
2914                        pgprot_t prot, unsigned long vm_flags, int node,
2915                        const void *caller)
2916{
2917        struct vm_struct *area;
2918        void *addr;
2919        unsigned long real_size = size;
2920        unsigned long real_align = align;
2921        unsigned int shift = PAGE_SHIFT;
2922
2923        if (WARN_ON_ONCE(!size))
2924                return NULL;
2925
2926        if ((size >> PAGE_SHIFT) > totalram_pages()) {
2927                warn_alloc(gfp_mask, NULL,
2928                        "vmalloc error: size %lu, exceeds total pages",
2929                        real_size);
2930                return NULL;
2931        }
2932
2933        if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
2934                unsigned long size_per_node;
2935
2936                /*
2937                 * Try huge pages. Only try for PAGE_KERNEL allocations,
2938                 * others like modules don't yet expect huge pages in
2939                 * their allocations due to apply_to_page_range not
2940                 * supporting them.
2941                 */
2942
2943                size_per_node = size;
2944                if (node == NUMA_NO_NODE)
2945                        size_per_node /= num_online_nodes();
2946                if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
2947                        shift = PMD_SHIFT;
2948                else
2949                        shift = arch_vmap_pte_supported_shift(size_per_node);
2950
2951                align = max(real_align, 1UL << shift);
2952                size = ALIGN(real_size, 1UL << shift);
2953        }
2954
2955again:
2956        area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
2957                                  VM_UNINITIALIZED | vm_flags, start, end, node,
2958                                  gfp_mask, caller);
2959        if (!area) {
2960                warn_alloc(gfp_mask, NULL,
2961                        "vmalloc error: size %lu, vm_struct allocation failed",
2962                        real_size);
2963                goto fail;
2964        }
2965
2966        addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
2967        if (!addr)
2968                goto fail;
2969
2970        /*
2971         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2972         * flag. It means that vm_struct is not fully initialized.
2973         * Now, it is fully initialized, so remove this flag here.
2974         */
2975        clear_vm_uninitialized_flag(area);
2976
2977        size = PAGE_ALIGN(size);
2978        kmemleak_vmalloc(area, size, gfp_mask);
2979
2980        return addr;
2981
2982fail:
2983        if (shift > PAGE_SHIFT) {
2984                shift = PAGE_SHIFT;
2985                align = real_align;
2986                size = real_size;
2987                goto again;
2988        }
2989
2990        return NULL;
2991}
2992
2993/**
2994 * __vmalloc_node - allocate virtually contiguous memory
2995 * @size:           allocation size
2996 * @align:          desired alignment
2997 * @gfp_mask:       flags for the page level allocator
2998 * @node:           node to use for allocation or NUMA_NO_NODE
2999 * @caller:         caller's return address
3000 *
3001 * Allocate enough pages to cover @size from the page level allocator with
3002 * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3003 *
3004 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3005 * and __GFP_NOFAIL are not supported
3006 *
3007 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3008 * with mm people.
3009 *
3010 * Return: pointer to the allocated memory or %NULL on error
3011 */
3012void *__vmalloc_node(unsigned long size, unsigned long align,
3013                            gfp_t gfp_mask, int node, const void *caller)
3014{
3015        return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3016                                gfp_mask, PAGE_KERNEL, 0, node, caller);
3017}
3018/*
3019 * This is only for performance analysis of vmalloc and stress purpose.
3020 * It is required by vmalloc test module, therefore do not use it other
3021 * than that.
3022 */
3023#ifdef CONFIG_TEST_VMALLOC_MODULE
3024EXPORT_SYMBOL_GPL(__vmalloc_node);
3025#endif
3026
3027void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3028{
3029        return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3030                                __builtin_return_address(0));
3031}
3032EXPORT_SYMBOL(__vmalloc);
3033
3034/**
3035 * vmalloc - allocate virtually contiguous memory
3036 * @size:    allocation size
3037 *
3038 * Allocate enough pages to cover @size from the page level
3039 * allocator and map them into contiguous kernel virtual space.
3040 *
3041 * For tight control over page level allocator and protection flags
3042 * use __vmalloc() instead.
3043 *
3044 * Return: pointer to the allocated memory or %NULL on error
3045 */
3046void *vmalloc(unsigned long size)
3047{
3048        return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3049                                __builtin_return_address(0));
3050}
3051EXPORT_SYMBOL(vmalloc);
3052
3053/**
3054 * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3055 * @size:    allocation size
3056 *
3057 * Allocate enough non-huge pages to cover @size from the page level
3058 * allocator and map them into contiguous kernel virtual space.
3059 *
3060 * Return: pointer to the allocated memory or %NULL on error
3061 */
3062void *vmalloc_no_huge(unsigned long size)
3063{
3064        return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3065                                    GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
3066                                    NUMA_NO_NODE, __builtin_return_address(0));
3067}
3068EXPORT_SYMBOL(vmalloc_no_huge);
3069
3070/**
3071 * vzalloc - allocate virtually contiguous memory with zero fill
3072 * @size:    allocation size
3073 *
3074 * Allocate enough pages to cover @size from the page level
3075 * allocator and map them into contiguous kernel virtual space.
3076 * The memory allocated is set to zero.
3077 *
3078 * For tight control over page level allocator and protection flags
3079 * use __vmalloc() instead.
3080 *
3081 * Return: pointer to the allocated memory or %NULL on error
3082 */
3083void *vzalloc(unsigned long size)
3084{
3085        return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3086                                __builtin_return_address(0));
3087}
3088EXPORT_SYMBOL(vzalloc);
3089
3090/**
3091 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3092 * @size: allocation size
3093 *
3094 * The resulting memory area is zeroed so it can be mapped to userspace
3095 * without leaking data.
3096 *
3097 * Return: pointer to the allocated memory or %NULL on error
3098 */
3099void *vmalloc_user(unsigned long size)
3100{
3101        return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3102                                    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3103                                    VM_USERMAP, NUMA_NO_NODE,
3104                                    __builtin_return_address(0));
3105}
3106EXPORT_SYMBOL(vmalloc_user);
3107
3108/**
3109 * vmalloc_node - allocate memory on a specific node
3110 * @size:         allocation size
3111 * @node:         numa node
3112 *
3113 * Allocate enough pages to cover @size from the page level
3114 * allocator and map them into contiguous kernel virtual space.
3115 *
3116 * For tight control over page level allocator and protection flags
3117 * use __vmalloc() instead.
3118 *
3119 * Return: pointer to the allocated memory or %NULL on error
3120 */
3121void *vmalloc_node(unsigned long size, int node)
3122{
3123        return __vmalloc_node(size, 1, GFP_KERNEL, node,
3124                        __builtin_return_address(0));
3125}
3126EXPORT_SYMBOL(vmalloc_node);
3127
3128/**
3129 * vzalloc_node - allocate memory on a specific node with zero fill
3130 * @size:       allocation size
3131 * @node:       numa node
3132 *
3133 * Allocate enough pages to cover @size from the page level
3134 * allocator and map them into contiguous kernel virtual space.
3135 * The memory allocated is set to zero.
3136 *
3137 * Return: pointer to the allocated memory or %NULL on error
3138 */
3139void *vzalloc_node(unsigned long size, int node)
3140{
3141        return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3142                                __builtin_return_address(0));
3143}
3144EXPORT_SYMBOL(vzalloc_node);
3145
3146#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3147#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3148#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3149#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3150#else
3151/*
3152 * 64b systems should always have either DMA or DMA32 zones. For others
3153 * GFP_DMA32 should do the right thing and use the normal zone.
3154 */
3155#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3156#endif
3157
3158/**
3159 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3160 * @size:       allocation size
3161 *
3162 * Allocate enough 32bit PA addressable pages to cover @size from the
3163 * page level allocator and map them into contiguous kernel virtual space.
3164 *
3165 * Return: pointer to the allocated memory or %NULL on error
3166 */
3167void *vmalloc_32(unsigned long size)
3168{
3169        return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3170                        __builtin_return_address(0));
3171}
3172EXPORT_SYMBOL(vmalloc_32);
3173
3174/**
3175 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3176 * @size:            allocation size
3177 *
3178 * The resulting memory area is 32bit addressable and zeroed so it can be
3179 * mapped to userspace without leaking data.
3180 *
3181 * Return: pointer to the allocated memory or %NULL on error
3182 */
3183void *vmalloc_32_user(unsigned long size)
3184{
3185        return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3186                                    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3187                                    VM_USERMAP, NUMA_NO_NODE,
3188                                    __builtin_return_address(0));
3189}
3190EXPORT_SYMBOL(vmalloc_32_user);
3191
3192/*
3193 * small helper routine , copy contents to buf from addr.
3194 * If the page is not present, fill zero.
3195 */
3196
3197static int aligned_vread(char *buf, char *addr, unsigned long count)
3198{
3199        struct page *p;
3200        int copied = 0;
3201
3202        while (count) {
3203                unsigned long offset, length;
3204
3205                offset = offset_in_page(addr);
3206                length = PAGE_SIZE - offset;
3207                if (length > count)
3208                        length = count;
3209                p = vmalloc_to_page(addr);
3210                /*
3211                 * To do safe access to this _mapped_ area, we need
3212                 * lock. But adding lock here means that we need to add
3213                 * overhead of vmalloc()/vfree() calls for this _debug_
3214                 * interface, rarely used. Instead of that, we'll use
3215                 * kmap() and get small overhead in this access function.
3216                 */
3217                if (p) {
3218                        /* We can expect USER0 is not used -- see vread() */
3219                        void *map = kmap_atomic(p);
3220                        memcpy(buf, map + offset, length);
3221                        kunmap_atomic(map);
3222                } else
3223                        memset(buf, 0, length);
3224
3225                addr += length;
3226                buf += length;
3227                copied += length;
3228                count -= length;
3229        }
3230        return copied;
3231}
3232
3233/**
3234 * vread() - read vmalloc area in a safe way.
3235 * @buf:     buffer for reading data
3236 * @addr:    vm address.
3237 * @count:   number of bytes to be read.
3238 *
3239 * This function checks that addr is a valid vmalloc'ed area, and
3240 * copy data from that area to a given buffer. If the given memory range
3241 * of [addr...addr+count) includes some valid address, data is copied to
3242 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3243 * IOREMAP area is treated as memory hole and no copy is done.
3244 *
3245 * If [addr...addr+count) doesn't includes any intersects with alive
3246 * vm_struct area, returns 0. @buf should be kernel's buffer.
3247 *
3248 * Note: In usual ops, vread() is never necessary because the caller
3249 * should know vmalloc() area is valid and can use memcpy().
3250 * This is for routines which have to access vmalloc area without
3251 * any information, as /proc/kcore.
3252 *
3253 * Return: number of bytes for which addr and buf should be increased
3254 * (same number as @count) or %0 if [addr...addr+count) doesn't
3255 * include any intersection with valid vmalloc area
3256 */
3257long vread(char *buf, char *addr, unsigned long count)
3258{
3259        struct vmap_area *va;
3260        struct vm_struct *vm;
3261        char *vaddr, *buf_start = buf;
3262        unsigned long buflen = count;
3263        unsigned long n;
3264
3265        /* Don't allow overflow */
3266        if ((unsigned long) addr + count < count)
3267                count = -(unsigned long) addr;
3268
3269        spin_lock(&vmap_area_lock);
3270        va = __find_vmap_area((unsigned long)addr);
3271        if (!va)
3272                goto finished;
3273        list_for_each_entry_from(va, &vmap_area_list, list) {
3274                if (!count)
3275                        break;
3276
3277                if (!va->vm)
3278                        continue;
3279
3280                vm = va->vm;
3281                vaddr = (char *) vm->addr;
3282                if (addr >= vaddr + get_vm_area_size(vm))
3283                        continue;
3284                while (addr < vaddr) {
3285                        if (count == 0)
3286                                goto finished;
3287                        *buf = '\0';
3288                        buf++;
3289                        addr++;
3290                        count--;
3291                }
3292                n = vaddr + get_vm_area_size(vm) - addr;
3293                if (n > count)
3294                        n = count;
3295                if (!(vm->flags & VM_IOREMAP))
3296                        aligned_vread(buf, addr, n);
3297                else /* IOREMAP area is treated as memory hole */
3298                        memset(buf, 0, n);
3299                buf += n;
3300                addr += n;
3301                count -= n;
3302        }
3303finished:
3304        spin_unlock(&vmap_area_lock);
3305
3306        if (buf == buf_start)
3307                return 0;
3308        /* zero-fill memory holes */
3309        if (buf != buf_start + buflen)
3310                memset(buf, 0, buflen - (buf - buf_start));
3311
3312        return buflen;
3313}
3314
3315/**
3316 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3317 * @vma:                vma to cover
3318 * @uaddr:              target user address to start at
3319 * @kaddr:              virtual address of vmalloc kernel memory
3320 * @pgoff:              offset from @kaddr to start at
3321 * @size:               size of map area
3322 *
3323 * Returns:     0 for success, -Exxx on failure
3324 *
3325 * This function checks that @kaddr is a valid vmalloc'ed area,
3326 * and that it is big enough to cover the range starting at
3327 * @uaddr in @vma. Will return failure if that criteria isn't
3328 * met.
3329 *
3330 * Similar to remap_pfn_range() (see mm/memory.c)
3331 */
3332int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3333                                void *kaddr, unsigned long pgoff,
3334                                unsigned long size)
3335{
3336        struct vm_struct *area;
3337        unsigned long off;
3338        unsigned long end_index;
3339
3340        if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3341                return -EINVAL;
3342
3343        size = PAGE_ALIGN(size);
3344
3345        if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3346                return -EINVAL;
3347
3348        area = find_vm_area(kaddr);
3349        if (!area)
3350                return -EINVAL;
3351
3352        if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3353                return -EINVAL;
3354
3355        if (check_add_overflow(size, off, &end_index) ||
3356            end_index > get_vm_area_size(area))
3357                return -EINVAL;
3358        kaddr += off;
3359
3360        do {
3361                struct page *page = vmalloc_to_page(kaddr);
3362                int ret;
3363
3364                ret = vm_insert_page(vma, uaddr, page);
3365                if (ret)
3366                        return ret;
3367
3368                uaddr += PAGE_SIZE;
3369                kaddr += PAGE_SIZE;
3370                size -= PAGE_SIZE;
3371        } while (size > 0);
3372
3373        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3374
3375        return 0;
3376}
3377
3378/**
3379 * remap_vmalloc_range - map vmalloc pages to userspace
3380 * @vma:                vma to cover (map full range of vma)
3381 * @addr:               vmalloc memory
3382 * @pgoff:              number of pages into addr before first page to map
3383 *
3384 * Returns:     0 for success, -Exxx on failure
3385 *
3386 * This function checks that addr is a valid vmalloc'ed area, and
3387 * that it is big enough to cover the vma. Will return failure if
3388 * that criteria isn't met.
3389 *
3390 * Similar to remap_pfn_range() (see mm/memory.c)
3391 */
3392int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3393                                                unsigned long pgoff)
3394{
3395        return remap_vmalloc_range_partial(vma, vma->vm_start,
3396                                           addr, pgoff,
3397                                           vma->vm_end - vma->vm_start);
3398}
3399EXPORT_SYMBOL(remap_vmalloc_range);
3400
3401void free_vm_area(struct vm_struct *area)
3402{
3403        struct vm_struct *ret;
3404        ret = remove_vm_area(area->addr);
3405        BUG_ON(ret != area);
3406        kfree(area);
3407}
3408EXPORT_SYMBOL_GPL(free_vm_area);
3409
3410#ifdef CONFIG_SMP
3411static struct vmap_area *node_to_va(struct rb_node *n)
3412{
3413        return rb_entry_safe(n, struct vmap_area, rb_node);
3414}
3415
3416/**
3417 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3418 * @addr: target address
3419 *
3420 * Returns: vmap_area if it is found. If there is no such area
3421 *   the first highest(reverse order) vmap_area is returned
3422 *   i.e. va->va_start < addr && va->va_end < addr or NULL
3423 *   if there are no any areas before @addr.
3424 */
3425static struct vmap_area *
3426pvm_find_va_enclose_addr(unsigned long addr)
3427{
3428        struct vmap_area *va, *tmp;
3429        struct rb_node *n;
3430
3431        n = free_vmap_area_root.rb_node;
3432        va = NULL;
3433
3434        while (n) {
3435                tmp = rb_entry(n, struct vmap_area, rb_node);
3436                if (tmp->va_start <= addr) {
3437                        va = tmp;
3438                        if (tmp->va_end >= addr)
3439                                break;
3440
3441                        n = n->rb_right;
3442                } else {
3443                        n = n->rb_left;
3444                }
3445        }
3446
3447        return va;
3448}
3449
3450/**
3451 * pvm_determine_end_from_reverse - find the highest aligned address
3452 * of free block below VMALLOC_END
3453 * @va:
3454 *   in - the VA we start the search(reverse order);
3455 *   out - the VA with the highest aligned end address.
3456 * @align: alignment for required highest address
3457 *
3458 * Returns: determined end address within vmap_area
3459 */
3460static unsigned long
3461pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3462{
3463        unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3464        unsigned long addr;
3465
3466        if (likely(*va)) {
3467                list_for_each_entry_from_reverse((*va),
3468                                &free_vmap_area_list, list) {
3469                        addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3470                        if ((*va)->va_start < addr)
3471                                return addr;
3472                }
3473        }
3474
3475        return 0;
3476}
3477
3478/**
3479 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3480 * @offsets: array containing offset of each area
3481 * @sizes: array containing size of each area
3482 * @nr_vms: the number of areas to allocate
3483 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3484 *
3485 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3486 *          vm_structs on success, %NULL on failure
3487 *
3488 * Percpu allocator wants to use congruent vm areas so that it can
3489 * maintain the offsets among percpu areas.  This function allocates
3490 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3491 * be scattered pretty far, distance between two areas easily going up
3492 * to gigabytes.  To avoid interacting with regular vmallocs, these
3493 * areas are allocated from top.
3494 *
3495 * Despite its complicated look, this allocator is rather simple. It
3496 * does everything top-down and scans free blocks from the end looking
3497 * for matching base. While scanning, if any of the areas do not fit the
3498 * base address is pulled down to fit the area. Scanning is repeated till
3499 * all the areas fit and then all necessary data structures are inserted
3500 * and the result is returned.
3501 */
3502struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3503                                     const size_t *sizes, int nr_vms,
3504                                     size_t align)
3505{
3506        const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3507        const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3508        struct vmap_area **vas, *va;
3509        struct vm_struct **vms;
3510        int area, area2, last_area, term_area;
3511        unsigned long base, start, size, end, last_end, orig_start, orig_end;
3512        bool purged = false;
3513        enum fit_type type;
3514
3515        /* verify parameters and allocate data structures */
3516        BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3517        for (last_area = 0, area = 0; area < nr_vms; area++) {
3518                start = offsets[area];
3519                end = start + sizes[area];
3520
3521                /* is everything aligned properly? */
3522                BUG_ON(!IS_ALIGNED(offsets[area], align));
3523                BUG_ON(!IS_ALIGNED(sizes[area], align));
3524
3525                /* detect the area with the highest address */
3526                if (start > offsets[last_area])
3527                        last_area = area;
3528
3529                for (area2 = area + 1; area2 < nr_vms; area2++) {
3530                        unsigned long start2 = offsets[area2];
3531                        unsigned long end2 = start2 + sizes[area2];
3532
3533                        BUG_ON(start2 < end && start < end2);
3534                }
3535        }
3536        last_end = offsets[last_area] + sizes[last_area];
3537
3538        if (vmalloc_end - vmalloc_start < last_end) {
3539                WARN_ON(true);
3540                return NULL;
3541        }
3542
3543        vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3544        vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3545        if (!vas || !vms)
3546                goto err_free2;
3547
3548        for (area = 0; area < nr_vms; area++) {
3549                vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3550                vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3551                if (!vas[area] || !vms[area])
3552                        goto err_free;
3553        }
3554retry:
3555        spin_lock(&free_vmap_area_lock);
3556
3557        /* start scanning - we scan from the top, begin with the last area */
3558        area = term_area = last_area;
3559        start = offsets[area];
3560        end = start + sizes[area];
3561
3562        va = pvm_find_va_enclose_addr(vmalloc_end);
3563        base = pvm_determine_end_from_reverse(&va, align) - end;
3564
3565        while (true) {
3566                /*
3567                 * base might have underflowed, add last_end before
3568                 * comparing.
3569                 */
3570                if (base + last_end < vmalloc_start + last_end)
3571                        goto overflow;
3572
3573                /*
3574                 * Fitting base has not been found.
3575                 */
3576                if (va == NULL)
3577                        goto overflow;
3578
3579                /*
3580                 * If required width exceeds current VA block, move
3581                 * base downwards and then recheck.
3582                 */
3583                if (base + end > va->va_end) {
3584                        base = pvm_determine_end_from_reverse(&va, align) - end;
3585                        term_area = area;
3586                        continue;
3587                }
3588
3589                /*
3590                 * If this VA does not fit, move base downwards and recheck.
3591                 */
3592                if (base + start < va->va_start) {
3593                        va = node_to_va(rb_prev(&va->rb_node));
3594                        base = pvm_determine_end_from_reverse(&va, align) - end;
3595                        term_area = area;
3596                        continue;
3597                }
3598
3599                /*
3600                 * This area fits, move on to the previous one.  If
3601                 * the previous one is the terminal one, we're done.
3602                 */
3603                area = (area + nr_vms - 1) % nr_vms;
3604                if (area == term_area)
3605                        break;
3606
3607                start = offsets[area];
3608                end = start + sizes[area];
3609                va = pvm_find_va_enclose_addr(base + end);
3610        }
3611
3612        /* we've found a fitting base, insert all va's */
3613        for (area = 0; area < nr_vms; area++) {
3614                int ret;
3615
3616                start = base + offsets[area];
3617                size = sizes[area];
3618
3619                va = pvm_find_va_enclose_addr(start);
3620                if (WARN_ON_ONCE(va == NULL))
3621                        /* It is a BUG(), but trigger recovery instead. */
3622                        goto recovery;
3623
3624                type = classify_va_fit_type(va, start, size);
3625                if (WARN_ON_ONCE(type == NOTHING_FIT))
3626                        /* It is a BUG(), but trigger recovery instead. */
3627                        goto recovery;
3628
3629                ret = adjust_va_to_fit_type(va, start, size, type);
3630                if (unlikely(ret))
3631                        goto recovery;
3632
3633                /* Allocated area. */
3634                va = vas[area];
3635                va->va_start = start;
3636                va->va_end = start + size;
3637        }
3638
3639        spin_unlock(&free_vmap_area_lock);
3640
3641        /* populate the kasan shadow space */
3642        for (area = 0; area < nr_vms; area++) {
3643                if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3644                        goto err_free_shadow;
3645
3646                kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3647                                       sizes[area]);
3648        }
3649
3650        /* insert all vm's */
3651        spin_lock(&vmap_area_lock);
3652        for (area = 0; area < nr_vms; area++) {
3653                insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3654
3655                setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3656                                 pcpu_get_vm_areas);
3657        }
3658        spin_unlock(&vmap_area_lock);
3659
3660        kfree(vas);
3661        return vms;
3662
3663recovery:
3664        /*
3665         * Remove previously allocated areas. There is no
3666         * need in removing these areas from the busy tree,
3667         * because they are inserted only on the final step
3668         * and when pcpu_get_vm_areas() is success.
3669         */
3670        while (area--) {
3671                orig_start = vas[area]->va_start;
3672                orig_end = vas[area]->va_end;
3673                va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3674                                &free_vmap_area_list);
3675                if (va)
3676                        kasan_release_vmalloc(orig_start, orig_end,
3677                                va->va_start, va->va_end);
3678                vas[area] = NULL;
3679        }
3680
3681overflow:
3682        spin_unlock(&free_vmap_area_lock);
3683        if (!purged) {
3684                purge_vmap_area_lazy();
3685                purged = true;
3686
3687                /* Before "retry", check if we recover. */
3688                for (area = 0; area < nr_vms; area++) {
3689                        if (vas[area])
3690                                continue;
3691
3692                        vas[area] = kmem_cache_zalloc(
3693                                vmap_area_cachep, GFP_KERNEL);
3694                        if (!vas[area])
3695                                goto err_free;
3696                }
3697
3698                goto retry;
3699        }
3700
3701err_free:
3702        for (area = 0; area < nr_vms; area++) {
3703                if (vas[area])
3704                        kmem_cache_free(vmap_area_cachep, vas[area]);
3705
3706                kfree(vms[area]);
3707        }
3708err_free2:
3709        kfree(vas);
3710        kfree(vms);
3711        return NULL;
3712
3713err_free_shadow:
3714        spin_lock(&free_vmap_area_lock);
3715        /*
3716         * We release all the vmalloc shadows, even the ones for regions that
3717         * hadn't been successfully added. This relies on kasan_release_vmalloc
3718         * being able to tolerate this case.
3719         */
3720        for (area = 0; area < nr_vms; area++) {
3721                orig_start = vas[area]->va_start;
3722                orig_end = vas[area]->va_end;
3723                va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3724                                &free_vmap_area_list);
3725                if (va)
3726                        kasan_release_vmalloc(orig_start, orig_end,
3727                                va->va_start, va->va_end);
3728                vas[area] = NULL;
3729                kfree(vms[area]);
3730        }
3731        spin_unlock(&free_vmap_area_lock);
3732        kfree(vas);
3733        kfree(vms);
3734        return NULL;
3735}
3736
3737/**
3738 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3739 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3740 * @nr_vms: the number of allocated areas
3741 *
3742 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3743 */
3744void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3745{
3746        int i;
3747
3748        for (i = 0; i < nr_vms; i++)
3749                free_vm_area(vms[i]);
3750        kfree(vms);
3751}
3752#endif  /* CONFIG_SMP */
3753
3754#ifdef CONFIG_PRINTK
3755bool vmalloc_dump_obj(void *object)
3756{
3757        struct vm_struct *vm;
3758        void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3759
3760        vm = find_vm_area(objp);
3761        if (!vm)
3762                return false;
3763        pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3764                vm->nr_pages, (unsigned long)vm->addr, vm->caller);
3765        return true;
3766}
3767#endif
3768
3769#ifdef CONFIG_PROC_FS
3770static void *s_start(struct seq_file *m, loff_t *pos)
3771        __acquires(&vmap_purge_lock)
3772        __acquires(&vmap_area_lock)
3773{
3774        mutex_lock(&vmap_purge_lock);
3775        spin_lock(&vmap_area_lock);
3776
3777        return seq_list_start(&vmap_area_list, *pos);
3778}
3779
3780static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3781{
3782        return seq_list_next(p, &vmap_area_list, pos);
3783}
3784
3785static void s_stop(struct seq_file *m, void *p)
3786        __releases(&vmap_area_lock)
3787        __releases(&vmap_purge_lock)
3788{
3789        spin_unlock(&vmap_area_lock);
3790        mutex_unlock(&vmap_purge_lock);
3791}
3792
3793static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3794{
3795        if (IS_ENABLED(CONFIG_NUMA)) {
3796                unsigned int nr, *counters = m->private;
3797
3798                if (!counters)
3799                        return;
3800
3801                if (v->flags & VM_UNINITIALIZED)
3802                        return;
3803                /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3804                smp_rmb();
3805
3806                memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3807
3808                for (nr = 0; nr < v->nr_pages; nr++)
3809                        counters[page_to_nid(v->pages[nr])]++;
3810
3811                for_each_node_state(nr, N_HIGH_MEMORY)
3812                        if (counters[nr])
3813                                seq_printf(m, " N%u=%u", nr, counters[nr]);
3814        }
3815}
3816
3817static void show_purge_info(struct seq_file *m)
3818{
3819        struct vmap_area *va;
3820
3821        spin_lock(&purge_vmap_area_lock);
3822        list_for_each_entry(va, &purge_vmap_area_list, list) {
3823                seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3824                        (void *)va->va_start, (void *)va->va_end,
3825                        va->va_end - va->va_start);
3826        }
3827        spin_unlock(&purge_vmap_area_lock);
3828}
3829
3830static int s_show(struct seq_file *m, void *p)
3831{
3832        struct vmap_area *va;
3833        struct vm_struct *v;
3834
3835        va = list_entry(p, struct vmap_area, list);
3836
3837        /*
3838         * s_show can encounter race with remove_vm_area, !vm on behalf
3839         * of vmap area is being tear down or vm_map_ram allocation.
3840         */
3841        if (!va->vm) {
3842                seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3843                        (void *)va->va_start, (void *)va->va_end,
3844                        va->va_end - va->va_start);
3845
3846                return 0;
3847        }
3848
3849        v = va->vm;
3850
3851        seq_printf(m, "0x%pK-0x%pK %7ld",
3852                v->addr, v->addr + v->size, v->size);
3853
3854        if (v->caller)
3855                seq_printf(m, " %pS", v->caller);
3856
3857        if (v->nr_pages)
3858                seq_printf(m, " pages=%d", v->nr_pages);
3859
3860        if (v->phys_addr)
3861                seq_printf(m, " phys=%pa", &v->phys_addr);
3862
3863        if (v->flags & VM_IOREMAP)
3864                seq_puts(m, " ioremap");
3865
3866        if (v->flags & VM_ALLOC)
3867                seq_puts(m, " vmalloc");
3868
3869        if (v->flags & VM_MAP)
3870                seq_puts(m, " vmap");
3871
3872        if (v->flags & VM_USERMAP)
3873                seq_puts(m, " user");
3874
3875        if (v->flags & VM_DMA_COHERENT)
3876                seq_puts(m, " dma-coherent");
3877
3878        if (is_vmalloc_addr(v->pages))
3879                seq_puts(m, " vpages");
3880
3881        show_numa_info(m, v);
3882        seq_putc(m, '\n');
3883
3884        /*
3885         * As a final step, dump "unpurged" areas.
3886         */
3887        if (list_is_last(&va->list, &vmap_area_list))
3888                show_purge_info(m);
3889
3890        return 0;
3891}
3892
3893static const struct seq_operations vmalloc_op = {
3894        .start = s_start,
3895        .next = s_next,
3896        .stop = s_stop,
3897        .show = s_show,
3898};
3899
3900static int __init proc_vmalloc_init(void)
3901{
3902        if (IS_ENABLED(CONFIG_NUMA))
3903                proc_create_seq_private("vmallocinfo", 0400, NULL,
3904                                &vmalloc_op,
3905                                nr_node_ids * sizeof(unsigned int), NULL);
3906        else
3907                proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3908        return 0;
3909}
3910module_init(proc_vmalloc_init);
3911
3912#endif
3913