linux/mm/vmalloc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 1993  Linus Torvalds
   4 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   5 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   6 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   7 *  Numa awareness, Christoph Lameter, SGI, June 2005
   8 *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
   9 */
  10
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/sched/signal.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/interrupt.h>
  19#include <linux/proc_fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/set_memory.h>
  22#include <linux/debugobjects.h>
  23#include <linux/kallsyms.h>
  24#include <linux/list.h>
  25#include <linux/notifier.h>
  26#include <linux/rbtree.h>
  27#include <linux/xarray.h>
  28#include <linux/io.h>
  29#include <linux/rcupdate.h>
  30#include <linux/pfn.h>
  31#include <linux/kmemleak.h>
  32#include <linux/atomic.h>
  33#include <linux/compiler.h>
  34#include <linux/llist.h>
  35#include <linux/bitops.h>
  36#include <linux/rbtree_augmented.h>
  37#include <linux/overflow.h>
  38#include <linux/pgtable.h>
  39#include <linux/uaccess.h>
  40#include <linux/hugetlb.h>
  41#include <asm/tlbflush.h>
  42#include <asm/shmparam.h>
  43
  44#include "internal.h"
  45#include "pgalloc-track.h"
  46
  47#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
  48static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
  49
  50static int __init set_nohugeiomap(char *str)
  51{
  52        ioremap_max_page_shift = PAGE_SHIFT;
  53        return 0;
  54}
  55early_param("nohugeiomap", set_nohugeiomap);
  56#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  57static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
  58#endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  59
  60#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  61static bool __ro_after_init vmap_allow_huge = true;
  62
  63static int __init set_nohugevmalloc(char *str)
  64{
  65        vmap_allow_huge = false;
  66        return 0;
  67}
  68early_param("nohugevmalloc", set_nohugevmalloc);
  69#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  70static const bool vmap_allow_huge = false;
  71#endif  /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  72
  73bool is_vmalloc_addr(const void *x)
  74{
  75        unsigned long addr = (unsigned long)x;
  76
  77        return addr >= VMALLOC_START && addr < VMALLOC_END;
  78}
  79EXPORT_SYMBOL(is_vmalloc_addr);
  80
  81struct vfree_deferred {
  82        struct llist_head list;
  83        struct work_struct wq;
  84};
  85static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  86
  87static void __vunmap(const void *, int);
  88
  89static void free_work(struct work_struct *w)
  90{
  91        struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  92        struct llist_node *t, *llnode;
  93
  94        llist_for_each_safe(llnode, t, llist_del_all(&p->list))
  95                __vunmap((void *)llnode, 1);
  96}
  97
  98/*** Page table manipulation functions ***/
  99static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 100                        phys_addr_t phys_addr, pgprot_t prot,
 101                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 102{
 103        pte_t *pte;
 104        u64 pfn;
 105        unsigned long size = PAGE_SIZE;
 106
 107        pfn = phys_addr >> PAGE_SHIFT;
 108        pte = pte_alloc_kernel_track(pmd, addr, mask);
 109        if (!pte)
 110                return -ENOMEM;
 111        do {
 112                BUG_ON(!pte_none(*pte));
 113
 114#ifdef CONFIG_HUGETLB_PAGE
 115                size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
 116                if (size != PAGE_SIZE) {
 117                        pte_t entry = pfn_pte(pfn, prot);
 118
 119                        entry = pte_mkhuge(entry);
 120                        entry = arch_make_huge_pte(entry, ilog2(size), 0);
 121                        set_huge_pte_at(&init_mm, addr, pte, entry);
 122                        pfn += PFN_DOWN(size);
 123                        continue;
 124                }
 125#endif
 126                set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
 127                pfn++;
 128        } while (pte += PFN_DOWN(size), addr += size, addr != end);
 129        *mask |= PGTBL_PTE_MODIFIED;
 130        return 0;
 131}
 132
 133static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
 134                        phys_addr_t phys_addr, pgprot_t prot,
 135                        unsigned int max_page_shift)
 136{
 137        if (max_page_shift < PMD_SHIFT)
 138                return 0;
 139
 140        if (!arch_vmap_pmd_supported(prot))
 141                return 0;
 142
 143        if ((end - addr) != PMD_SIZE)
 144                return 0;
 145
 146        if (!IS_ALIGNED(addr, PMD_SIZE))
 147                return 0;
 148
 149        if (!IS_ALIGNED(phys_addr, PMD_SIZE))
 150                return 0;
 151
 152        if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
 153                return 0;
 154
 155        return pmd_set_huge(pmd, phys_addr, prot);
 156}
 157
 158static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 159                        phys_addr_t phys_addr, pgprot_t prot,
 160                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 161{
 162        pmd_t *pmd;
 163        unsigned long next;
 164
 165        pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 166        if (!pmd)
 167                return -ENOMEM;
 168        do {
 169                next = pmd_addr_end(addr, end);
 170
 171                if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
 172                                        max_page_shift)) {
 173                        *mask |= PGTBL_PMD_MODIFIED;
 174                        continue;
 175                }
 176
 177                if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
 178                        return -ENOMEM;
 179        } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
 180        return 0;
 181}
 182
 183static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
 184                        phys_addr_t phys_addr, pgprot_t prot,
 185                        unsigned int max_page_shift)
 186{
 187        if (max_page_shift < PUD_SHIFT)
 188                return 0;
 189
 190        if (!arch_vmap_pud_supported(prot))
 191                return 0;
 192
 193        if ((end - addr) != PUD_SIZE)
 194                return 0;
 195
 196        if (!IS_ALIGNED(addr, PUD_SIZE))
 197                return 0;
 198
 199        if (!IS_ALIGNED(phys_addr, PUD_SIZE))
 200                return 0;
 201
 202        if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
 203                return 0;
 204
 205        return pud_set_huge(pud, phys_addr, prot);
 206}
 207
 208static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 209                        phys_addr_t phys_addr, pgprot_t prot,
 210                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 211{
 212        pud_t *pud;
 213        unsigned long next;
 214
 215        pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 216        if (!pud)
 217                return -ENOMEM;
 218        do {
 219                next = pud_addr_end(addr, end);
 220
 221                if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
 222                                        max_page_shift)) {
 223                        *mask |= PGTBL_PUD_MODIFIED;
 224                        continue;
 225                }
 226
 227                if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
 228                                        max_page_shift, mask))
 229                        return -ENOMEM;
 230        } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
 231        return 0;
 232}
 233
 234static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
 235                        phys_addr_t phys_addr, pgprot_t prot,
 236                        unsigned int max_page_shift)
 237{
 238        if (max_page_shift < P4D_SHIFT)
 239                return 0;
 240
 241        if (!arch_vmap_p4d_supported(prot))
 242                return 0;
 243
 244        if ((end - addr) != P4D_SIZE)
 245                return 0;
 246
 247        if (!IS_ALIGNED(addr, P4D_SIZE))
 248                return 0;
 249
 250        if (!IS_ALIGNED(phys_addr, P4D_SIZE))
 251                return 0;
 252
 253        if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
 254                return 0;
 255
 256        return p4d_set_huge(p4d, phys_addr, prot);
 257}
 258
 259static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 260                        phys_addr_t phys_addr, pgprot_t prot,
 261                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 262{
 263        p4d_t *p4d;
 264        unsigned long next;
 265
 266        p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 267        if (!p4d)
 268                return -ENOMEM;
 269        do {
 270                next = p4d_addr_end(addr, end);
 271
 272                if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
 273                                        max_page_shift)) {
 274                        *mask |= PGTBL_P4D_MODIFIED;
 275                        continue;
 276                }
 277
 278                if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
 279                                        max_page_shift, mask))
 280                        return -ENOMEM;
 281        } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
 282        return 0;
 283}
 284
 285static int vmap_range_noflush(unsigned long addr, unsigned long end,
 286                        phys_addr_t phys_addr, pgprot_t prot,
 287                        unsigned int max_page_shift)
 288{
 289        pgd_t *pgd;
 290        unsigned long start;
 291        unsigned long next;
 292        int err;
 293        pgtbl_mod_mask mask = 0;
 294
 295        might_sleep();
 296        BUG_ON(addr >= end);
 297
 298        start = addr;
 299        pgd = pgd_offset_k(addr);
 300        do {
 301                next = pgd_addr_end(addr, end);
 302                err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
 303                                        max_page_shift, &mask);
 304                if (err)
 305                        break;
 306        } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
 307
 308        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 309                arch_sync_kernel_mappings(start, end);
 310
 311        return err;
 312}
 313
 314int ioremap_page_range(unsigned long addr, unsigned long end,
 315                phys_addr_t phys_addr, pgprot_t prot)
 316{
 317        int err;
 318
 319        err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
 320                                 ioremap_max_page_shift);
 321        flush_cache_vmap(addr, end);
 322        return err;
 323}
 324
 325static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 326                             pgtbl_mod_mask *mask)
 327{
 328        pte_t *pte;
 329
 330        pte = pte_offset_kernel(pmd, addr);
 331        do {
 332                pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
 333                WARN_ON(!pte_none(ptent) && !pte_present(ptent));
 334        } while (pte++, addr += PAGE_SIZE, addr != end);
 335        *mask |= PGTBL_PTE_MODIFIED;
 336}
 337
 338static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 339                             pgtbl_mod_mask *mask)
 340{
 341        pmd_t *pmd;
 342        unsigned long next;
 343        int cleared;
 344
 345        pmd = pmd_offset(pud, addr);
 346        do {
 347                next = pmd_addr_end(addr, end);
 348
 349                cleared = pmd_clear_huge(pmd);
 350                if (cleared || pmd_bad(*pmd))
 351                        *mask |= PGTBL_PMD_MODIFIED;
 352
 353                if (cleared)
 354                        continue;
 355                if (pmd_none_or_clear_bad(pmd))
 356                        continue;
 357                vunmap_pte_range(pmd, addr, next, mask);
 358
 359                cond_resched();
 360        } while (pmd++, addr = next, addr != end);
 361}
 362
 363static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 364                             pgtbl_mod_mask *mask)
 365{
 366        pud_t *pud;
 367        unsigned long next;
 368        int cleared;
 369
 370        pud = pud_offset(p4d, addr);
 371        do {
 372                next = pud_addr_end(addr, end);
 373
 374                cleared = pud_clear_huge(pud);
 375                if (cleared || pud_bad(*pud))
 376                        *mask |= PGTBL_PUD_MODIFIED;
 377
 378                if (cleared)
 379                        continue;
 380                if (pud_none_or_clear_bad(pud))
 381                        continue;
 382                vunmap_pmd_range(pud, addr, next, mask);
 383        } while (pud++, addr = next, addr != end);
 384}
 385
 386static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 387                             pgtbl_mod_mask *mask)
 388{
 389        p4d_t *p4d;
 390        unsigned long next;
 391        int cleared;
 392
 393        p4d = p4d_offset(pgd, addr);
 394        do {
 395                next = p4d_addr_end(addr, end);
 396
 397                cleared = p4d_clear_huge(p4d);
 398                if (cleared || p4d_bad(*p4d))
 399                        *mask |= PGTBL_P4D_MODIFIED;
 400
 401                if (cleared)
 402                        continue;
 403                if (p4d_none_or_clear_bad(p4d))
 404                        continue;
 405                vunmap_pud_range(p4d, addr, next, mask);
 406        } while (p4d++, addr = next, addr != end);
 407}
 408
 409/*
 410 * vunmap_range_noflush is similar to vunmap_range, but does not
 411 * flush caches or TLBs.
 412 *
 413 * The caller is responsible for calling flush_cache_vmap() before calling
 414 * this function, and flush_tlb_kernel_range after it has returned
 415 * successfully (and before the addresses are expected to cause a page fault
 416 * or be re-mapped for something else, if TLB flushes are being delayed or
 417 * coalesced).
 418 *
 419 * This is an internal function only. Do not use outside mm/.
 420 */
 421void vunmap_range_noflush(unsigned long start, unsigned long end)
 422{
 423        unsigned long next;
 424        pgd_t *pgd;
 425        unsigned long addr = start;
 426        pgtbl_mod_mask mask = 0;
 427
 428        BUG_ON(addr >= end);
 429        pgd = pgd_offset_k(addr);
 430        do {
 431                next = pgd_addr_end(addr, end);
 432                if (pgd_bad(*pgd))
 433                        mask |= PGTBL_PGD_MODIFIED;
 434                if (pgd_none_or_clear_bad(pgd))
 435                        continue;
 436                vunmap_p4d_range(pgd, addr, next, &mask);
 437        } while (pgd++, addr = next, addr != end);
 438
 439        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 440                arch_sync_kernel_mappings(start, end);
 441}
 442
 443/**
 444 * vunmap_range - unmap kernel virtual addresses
 445 * @addr: start of the VM area to unmap
 446 * @end: end of the VM area to unmap (non-inclusive)
 447 *
 448 * Clears any present PTEs in the virtual address range, flushes TLBs and
 449 * caches. Any subsequent access to the address before it has been re-mapped
 450 * is a kernel bug.
 451 */
 452void vunmap_range(unsigned long addr, unsigned long end)
 453{
 454        flush_cache_vunmap(addr, end);
 455        vunmap_range_noflush(addr, end);
 456        flush_tlb_kernel_range(addr, end);
 457}
 458
 459static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
 460                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 461                pgtbl_mod_mask *mask)
 462{
 463        pte_t *pte;
 464
 465        /*
 466         * nr is a running index into the array which helps higher level
 467         * callers keep track of where we're up to.
 468         */
 469
 470        pte = pte_alloc_kernel_track(pmd, addr, mask);
 471        if (!pte)
 472                return -ENOMEM;
 473        do {
 474                struct page *page = pages[*nr];
 475
 476                if (WARN_ON(!pte_none(*pte)))
 477                        return -EBUSY;
 478                if (WARN_ON(!page))
 479                        return -ENOMEM;
 480                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 481                (*nr)++;
 482        } while (pte++, addr += PAGE_SIZE, addr != end);
 483        *mask |= PGTBL_PTE_MODIFIED;
 484        return 0;
 485}
 486
 487static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
 488                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 489                pgtbl_mod_mask *mask)
 490{
 491        pmd_t *pmd;
 492        unsigned long next;
 493
 494        pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 495        if (!pmd)
 496                return -ENOMEM;
 497        do {
 498                next = pmd_addr_end(addr, end);
 499                if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
 500                        return -ENOMEM;
 501        } while (pmd++, addr = next, addr != end);
 502        return 0;
 503}
 504
 505static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
 506                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 507                pgtbl_mod_mask *mask)
 508{
 509        pud_t *pud;
 510        unsigned long next;
 511
 512        pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 513        if (!pud)
 514                return -ENOMEM;
 515        do {
 516                next = pud_addr_end(addr, end);
 517                if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
 518                        return -ENOMEM;
 519        } while (pud++, addr = next, addr != end);
 520        return 0;
 521}
 522
 523static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
 524                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 525                pgtbl_mod_mask *mask)
 526{
 527        p4d_t *p4d;
 528        unsigned long next;
 529
 530        p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 531        if (!p4d)
 532                return -ENOMEM;
 533        do {
 534                next = p4d_addr_end(addr, end);
 535                if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
 536                        return -ENOMEM;
 537        } while (p4d++, addr = next, addr != end);
 538        return 0;
 539}
 540
 541static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
 542                pgprot_t prot, struct page **pages)
 543{
 544        unsigned long start = addr;
 545        pgd_t *pgd;
 546        unsigned long next;
 547        int err = 0;
 548        int nr = 0;
 549        pgtbl_mod_mask mask = 0;
 550
 551        BUG_ON(addr >= end);
 552        pgd = pgd_offset_k(addr);
 553        do {
 554                next = pgd_addr_end(addr, end);
 555                if (pgd_bad(*pgd))
 556                        mask |= PGTBL_PGD_MODIFIED;
 557                err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
 558                if (err)
 559                        return err;
 560        } while (pgd++, addr = next, addr != end);
 561
 562        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 563                arch_sync_kernel_mappings(start, end);
 564
 565        return 0;
 566}
 567
 568/*
 569 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
 570 * flush caches.
 571 *
 572 * The caller is responsible for calling flush_cache_vmap() after this
 573 * function returns successfully and before the addresses are accessed.
 574 *
 575 * This is an internal function only. Do not use outside mm/.
 576 */
 577int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 578                pgprot_t prot, struct page **pages, unsigned int page_shift)
 579{
 580        unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
 581
 582        WARN_ON(page_shift < PAGE_SHIFT);
 583
 584        if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
 585                        page_shift == PAGE_SHIFT)
 586                return vmap_small_pages_range_noflush(addr, end, prot, pages);
 587
 588        for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
 589                int err;
 590
 591                err = vmap_range_noflush(addr, addr + (1UL << page_shift),
 592                                        __pa(page_address(pages[i])), prot,
 593                                        page_shift);
 594                if (err)
 595                        return err;
 596
 597                addr += 1UL << page_shift;
 598        }
 599
 600        return 0;
 601}
 602
 603/**
 604 * vmap_pages_range - map pages to a kernel virtual address
 605 * @addr: start of the VM area to map
 606 * @end: end of the VM area to map (non-inclusive)
 607 * @prot: page protection flags to use
 608 * @pages: pages to map (always PAGE_SIZE pages)
 609 * @page_shift: maximum shift that the pages may be mapped with, @pages must
 610 * be aligned and contiguous up to at least this shift.
 611 *
 612 * RETURNS:
 613 * 0 on success, -errno on failure.
 614 */
 615static int vmap_pages_range(unsigned long addr, unsigned long end,
 616                pgprot_t prot, struct page **pages, unsigned int page_shift)
 617{
 618        int err;
 619
 620        err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
 621        flush_cache_vmap(addr, end);
 622        return err;
 623}
 624
 625int is_vmalloc_or_module_addr(const void *x)
 626{
 627        /*
 628         * ARM, x86-64 and sparc64 put modules in a special place,
 629         * and fall back on vmalloc() if that fails. Others
 630         * just put it in the vmalloc space.
 631         */
 632#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 633        unsigned long addr = (unsigned long)x;
 634        if (addr >= MODULES_VADDR && addr < MODULES_END)
 635                return 1;
 636#endif
 637        return is_vmalloc_addr(x);
 638}
 639
 640/*
 641 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
 642 * return the tail page that corresponds to the base page address, which
 643 * matches small vmap mappings.
 644 */
 645struct page *vmalloc_to_page(const void *vmalloc_addr)
 646{
 647        unsigned long addr = (unsigned long) vmalloc_addr;
 648        struct page *page = NULL;
 649        pgd_t *pgd = pgd_offset_k(addr);
 650        p4d_t *p4d;
 651        pud_t *pud;
 652        pmd_t *pmd;
 653        pte_t *ptep, pte;
 654
 655        /*
 656         * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 657         * architectures that do not vmalloc module space
 658         */
 659        VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 660
 661        if (pgd_none(*pgd))
 662                return NULL;
 663        if (WARN_ON_ONCE(pgd_leaf(*pgd)))
 664                return NULL; /* XXX: no allowance for huge pgd */
 665        if (WARN_ON_ONCE(pgd_bad(*pgd)))
 666                return NULL;
 667
 668        p4d = p4d_offset(pgd, addr);
 669        if (p4d_none(*p4d))
 670                return NULL;
 671        if (p4d_leaf(*p4d))
 672                return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
 673        if (WARN_ON_ONCE(p4d_bad(*p4d)))
 674                return NULL;
 675
 676        pud = pud_offset(p4d, addr);
 677        if (pud_none(*pud))
 678                return NULL;
 679        if (pud_leaf(*pud))
 680                return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 681        if (WARN_ON_ONCE(pud_bad(*pud)))
 682                return NULL;
 683
 684        pmd = pmd_offset(pud, addr);
 685        if (pmd_none(*pmd))
 686                return NULL;
 687        if (pmd_leaf(*pmd))
 688                return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 689        if (WARN_ON_ONCE(pmd_bad(*pmd)))
 690                return NULL;
 691
 692        ptep = pte_offset_map(pmd, addr);
 693        pte = *ptep;
 694        if (pte_present(pte))
 695                page = pte_page(pte);
 696        pte_unmap(ptep);
 697
 698        return page;
 699}
 700EXPORT_SYMBOL(vmalloc_to_page);
 701
 702/*
 703 * Map a vmalloc()-space virtual address to the physical page frame number.
 704 */
 705unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 706{
 707        return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 708}
 709EXPORT_SYMBOL(vmalloc_to_pfn);
 710
 711
 712/*** Global kva allocator ***/
 713
 714#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 715#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 716
 717
 718static DEFINE_SPINLOCK(vmap_area_lock);
 719static DEFINE_SPINLOCK(free_vmap_area_lock);
 720/* Export for kexec only */
 721LIST_HEAD(vmap_area_list);
 722static struct rb_root vmap_area_root = RB_ROOT;
 723static bool vmap_initialized __read_mostly;
 724
 725static struct rb_root purge_vmap_area_root = RB_ROOT;
 726static LIST_HEAD(purge_vmap_area_list);
 727static DEFINE_SPINLOCK(purge_vmap_area_lock);
 728
 729/*
 730 * This kmem_cache is used for vmap_area objects. Instead of
 731 * allocating from slab we reuse an object from this cache to
 732 * make things faster. Especially in "no edge" splitting of
 733 * free block.
 734 */
 735static struct kmem_cache *vmap_area_cachep;
 736
 737/*
 738 * This linked list is used in pair with free_vmap_area_root.
 739 * It gives O(1) access to prev/next to perform fast coalescing.
 740 */
 741static LIST_HEAD(free_vmap_area_list);
 742
 743/*
 744 * This augment red-black tree represents the free vmap space.
 745 * All vmap_area objects in this tree are sorted by va->va_start
 746 * address. It is used for allocation and merging when a vmap
 747 * object is released.
 748 *
 749 * Each vmap_area node contains a maximum available free block
 750 * of its sub-tree, right or left. Therefore it is possible to
 751 * find a lowest match of free area.
 752 */
 753static struct rb_root free_vmap_area_root = RB_ROOT;
 754
 755/*
 756 * Preload a CPU with one object for "no edge" split case. The
 757 * aim is to get rid of allocations from the atomic context, thus
 758 * to use more permissive allocation masks.
 759 */
 760static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
 761
 762static __always_inline unsigned long
 763va_size(struct vmap_area *va)
 764{
 765        return (va->va_end - va->va_start);
 766}
 767
 768static __always_inline unsigned long
 769get_subtree_max_size(struct rb_node *node)
 770{
 771        struct vmap_area *va;
 772
 773        va = rb_entry_safe(node, struct vmap_area, rb_node);
 774        return va ? va->subtree_max_size : 0;
 775}
 776
 777/*
 778 * Gets called when remove the node and rotate.
 779 */
 780static __always_inline unsigned long
 781compute_subtree_max_size(struct vmap_area *va)
 782{
 783        return max3(va_size(va),
 784                get_subtree_max_size(va->rb_node.rb_left),
 785                get_subtree_max_size(va->rb_node.rb_right));
 786}
 787
 788RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
 789        struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 790
 791static void purge_vmap_area_lazy(void);
 792static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
 793static unsigned long lazy_max_pages(void);
 794
 795static atomic_long_t nr_vmalloc_pages;
 796
 797unsigned long vmalloc_nr_pages(void)
 798{
 799        return atomic_long_read(&nr_vmalloc_pages);
 800}
 801
 802static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
 803{
 804        struct vmap_area *va = NULL;
 805        struct rb_node *n = vmap_area_root.rb_node;
 806
 807        while (n) {
 808                struct vmap_area *tmp;
 809
 810                tmp = rb_entry(n, struct vmap_area, rb_node);
 811                if (tmp->va_end > addr) {
 812                        va = tmp;
 813                        if (tmp->va_start <= addr)
 814                                break;
 815
 816                        n = n->rb_left;
 817                } else
 818                        n = n->rb_right;
 819        }
 820
 821        return va;
 822}
 823
 824static struct vmap_area *__find_vmap_area(unsigned long addr)
 825{
 826        struct rb_node *n = vmap_area_root.rb_node;
 827
 828        while (n) {
 829                struct vmap_area *va;
 830
 831                va = rb_entry(n, struct vmap_area, rb_node);
 832                if (addr < va->va_start)
 833                        n = n->rb_left;
 834                else if (addr >= va->va_end)
 835                        n = n->rb_right;
 836                else
 837                        return va;
 838        }
 839
 840        return NULL;
 841}
 842
 843/*
 844 * This function returns back addresses of parent node
 845 * and its left or right link for further processing.
 846 *
 847 * Otherwise NULL is returned. In that case all further
 848 * steps regarding inserting of conflicting overlap range
 849 * have to be declined and actually considered as a bug.
 850 */
 851static __always_inline struct rb_node **
 852find_va_links(struct vmap_area *va,
 853        struct rb_root *root, struct rb_node *from,
 854        struct rb_node **parent)
 855{
 856        struct vmap_area *tmp_va;
 857        struct rb_node **link;
 858
 859        if (root) {
 860                link = &root->rb_node;
 861                if (unlikely(!*link)) {
 862                        *parent = NULL;
 863                        return link;
 864                }
 865        } else {
 866                link = &from;
 867        }
 868
 869        /*
 870         * Go to the bottom of the tree. When we hit the last point
 871         * we end up with parent rb_node and correct direction, i name
 872         * it link, where the new va->rb_node will be attached to.
 873         */
 874        do {
 875                tmp_va = rb_entry(*link, struct vmap_area, rb_node);
 876
 877                /*
 878                 * During the traversal we also do some sanity check.
 879                 * Trigger the BUG() if there are sides(left/right)
 880                 * or full overlaps.
 881                 */
 882                if (va->va_start < tmp_va->va_end &&
 883                                va->va_end <= tmp_va->va_start)
 884                        link = &(*link)->rb_left;
 885                else if (va->va_end > tmp_va->va_start &&
 886                                va->va_start >= tmp_va->va_end)
 887                        link = &(*link)->rb_right;
 888                else {
 889                        WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
 890                                va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
 891
 892                        return NULL;
 893                }
 894        } while (*link);
 895
 896        *parent = &tmp_va->rb_node;
 897        return link;
 898}
 899
 900static __always_inline struct list_head *
 901get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
 902{
 903        struct list_head *list;
 904
 905        if (unlikely(!parent))
 906                /*
 907                 * The red-black tree where we try to find VA neighbors
 908                 * before merging or inserting is empty, i.e. it means
 909                 * there is no free vmap space. Normally it does not
 910                 * happen but we handle this case anyway.
 911                 */
 912                return NULL;
 913
 914        list = &rb_entry(parent, struct vmap_area, rb_node)->list;
 915        return (&parent->rb_right == link ? list->next : list);
 916}
 917
 918static __always_inline void
 919link_va(struct vmap_area *va, struct rb_root *root,
 920        struct rb_node *parent, struct rb_node **link, struct list_head *head)
 921{
 922        /*
 923         * VA is still not in the list, but we can
 924         * identify its future previous list_head node.
 925         */
 926        if (likely(parent)) {
 927                head = &rb_entry(parent, struct vmap_area, rb_node)->list;
 928                if (&parent->rb_right != link)
 929                        head = head->prev;
 930        }
 931
 932        /* Insert to the rb-tree */
 933        rb_link_node(&va->rb_node, parent, link);
 934        if (root == &free_vmap_area_root) {
 935                /*
 936                 * Some explanation here. Just perform simple insertion
 937                 * to the tree. We do not set va->subtree_max_size to
 938                 * its current size before calling rb_insert_augmented().
 939                 * It is because of we populate the tree from the bottom
 940                 * to parent levels when the node _is_ in the tree.
 941                 *
 942                 * Therefore we set subtree_max_size to zero after insertion,
 943                 * to let __augment_tree_propagate_from() puts everything to
 944                 * the correct order later on.
 945                 */
 946                rb_insert_augmented(&va->rb_node,
 947                        root, &free_vmap_area_rb_augment_cb);
 948                va->subtree_max_size = 0;
 949        } else {
 950                rb_insert_color(&va->rb_node, root);
 951        }
 952
 953        /* Address-sort this list */
 954        list_add(&va->list, head);
 955}
 956
 957static __always_inline void
 958unlink_va(struct vmap_area *va, struct rb_root *root)
 959{
 960        if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
 961                return;
 962
 963        if (root == &free_vmap_area_root)
 964                rb_erase_augmented(&va->rb_node,
 965                        root, &free_vmap_area_rb_augment_cb);
 966        else
 967                rb_erase(&va->rb_node, root);
 968
 969        list_del(&va->list);
 970        RB_CLEAR_NODE(&va->rb_node);
 971}
 972
 973#if DEBUG_AUGMENT_PROPAGATE_CHECK
 974static void
 975augment_tree_propagate_check(void)
 976{
 977        struct vmap_area *va;
 978        unsigned long computed_size;
 979
 980        list_for_each_entry(va, &free_vmap_area_list, list) {
 981                computed_size = compute_subtree_max_size(va);
 982                if (computed_size != va->subtree_max_size)
 983                        pr_emerg("tree is corrupted: %lu, %lu\n",
 984                                va_size(va), va->subtree_max_size);
 985        }
 986}
 987#endif
 988
 989/*
 990 * This function populates subtree_max_size from bottom to upper
 991 * levels starting from VA point. The propagation must be done
 992 * when VA size is modified by changing its va_start/va_end. Or
 993 * in case of newly inserting of VA to the tree.
 994 *
 995 * It means that __augment_tree_propagate_from() must be called:
 996 * - After VA has been inserted to the tree(free path);
 997 * - After VA has been shrunk(allocation path);
 998 * - After VA has been increased(merging path).
 999 *
1000 * Please note that, it does not mean that upper parent nodes
1001 * and their subtree_max_size are recalculated all the time up
1002 * to the root node.
1003 *
1004 *       4--8
1005 *        /\
1006 *       /  \
1007 *      /    \
1008 *    2--2  8--8
1009 *
1010 * For example if we modify the node 4, shrinking it to 2, then
1011 * no any modification is required. If we shrink the node 2 to 1
1012 * its subtree_max_size is updated only, and set to 1. If we shrink
1013 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1014 * node becomes 4--6.
1015 */
1016static __always_inline void
1017augment_tree_propagate_from(struct vmap_area *va)
1018{
1019        /*
1020         * Populate the tree from bottom towards the root until
1021         * the calculated maximum available size of checked node
1022         * is equal to its current one.
1023         */
1024        free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1025
1026#if DEBUG_AUGMENT_PROPAGATE_CHECK
1027        augment_tree_propagate_check();
1028#endif
1029}
1030
1031static void
1032insert_vmap_area(struct vmap_area *va,
1033        struct rb_root *root, struct list_head *head)
1034{
1035        struct rb_node **link;
1036        struct rb_node *parent;
1037
1038        link = find_va_links(va, root, NULL, &parent);
1039        if (link)
1040                link_va(va, root, parent, link, head);
1041}
1042
1043static void
1044insert_vmap_area_augment(struct vmap_area *va,
1045        struct rb_node *from, struct rb_root *root,
1046        struct list_head *head)
1047{
1048        struct rb_node **link;
1049        struct rb_node *parent;
1050
1051        if (from)
1052                link = find_va_links(va, NULL, from, &parent);
1053        else
1054                link = find_va_links(va, root, NULL, &parent);
1055
1056        if (link) {
1057                link_va(va, root, parent, link, head);
1058                augment_tree_propagate_from(va);
1059        }
1060}
1061
1062/*
1063 * Merge de-allocated chunk of VA memory with previous
1064 * and next free blocks. If coalesce is not done a new
1065 * free area is inserted. If VA has been merged, it is
1066 * freed.
1067 *
1068 * Please note, it can return NULL in case of overlap
1069 * ranges, followed by WARN() report. Despite it is a
1070 * buggy behaviour, a system can be alive and keep
1071 * ongoing.
1072 */
1073static __always_inline struct vmap_area *
1074merge_or_add_vmap_area(struct vmap_area *va,
1075        struct rb_root *root, struct list_head *head)
1076{
1077        struct vmap_area *sibling;
1078        struct list_head *next;
1079        struct rb_node **link;
1080        struct rb_node *parent;
1081        bool merged = false;
1082
1083        /*
1084         * Find a place in the tree where VA potentially will be
1085         * inserted, unless it is merged with its sibling/siblings.
1086         */
1087        link = find_va_links(va, root, NULL, &parent);
1088        if (!link)
1089                return NULL;
1090
1091        /*
1092         * Get next node of VA to check if merging can be done.
1093         */
1094        next = get_va_next_sibling(parent, link);
1095        if (unlikely(next == NULL))
1096                goto insert;
1097
1098        /*
1099         * start            end
1100         * |                |
1101         * |<------VA------>|<-----Next----->|
1102         *                  |                |
1103         *                  start            end
1104         */
1105        if (next != head) {
1106                sibling = list_entry(next, struct vmap_area, list);
1107                if (sibling->va_start == va->va_end) {
1108                        sibling->va_start = va->va_start;
1109
1110                        /* Free vmap_area object. */
1111                        kmem_cache_free(vmap_area_cachep, va);
1112
1113                        /* Point to the new merged area. */
1114                        va = sibling;
1115                        merged = true;
1116                }
1117        }
1118
1119        /*
1120         * start            end
1121         * |                |
1122         * |<-----Prev----->|<------VA------>|
1123         *                  |                |
1124         *                  start            end
1125         */
1126        if (next->prev != head) {
1127                sibling = list_entry(next->prev, struct vmap_area, list);
1128                if (sibling->va_end == va->va_start) {
1129                        /*
1130                         * If both neighbors are coalesced, it is important
1131                         * to unlink the "next" node first, followed by merging
1132                         * with "previous" one. Otherwise the tree might not be
1133                         * fully populated if a sibling's augmented value is
1134                         * "normalized" because of rotation operations.
1135                         */
1136                        if (merged)
1137                                unlink_va(va, root);
1138
1139                        sibling->va_end = va->va_end;
1140
1141                        /* Free vmap_area object. */
1142                        kmem_cache_free(vmap_area_cachep, va);
1143
1144                        /* Point to the new merged area. */
1145                        va = sibling;
1146                        merged = true;
1147                }
1148        }
1149
1150insert:
1151        if (!merged)
1152                link_va(va, root, parent, link, head);
1153
1154        return va;
1155}
1156
1157static __always_inline struct vmap_area *
1158merge_or_add_vmap_area_augment(struct vmap_area *va,
1159        struct rb_root *root, struct list_head *head)
1160{
1161        va = merge_or_add_vmap_area(va, root, head);
1162        if (va)
1163                augment_tree_propagate_from(va);
1164
1165        return va;
1166}
1167
1168static __always_inline bool
1169is_within_this_va(struct vmap_area *va, unsigned long size,
1170        unsigned long align, unsigned long vstart)
1171{
1172        unsigned long nva_start_addr;
1173
1174        if (va->va_start > vstart)
1175                nva_start_addr = ALIGN(va->va_start, align);
1176        else
1177                nva_start_addr = ALIGN(vstart, align);
1178
1179        /* Can be overflowed due to big size or alignment. */
1180        if (nva_start_addr + size < nva_start_addr ||
1181                        nva_start_addr < vstart)
1182                return false;
1183
1184        return (nva_start_addr + size <= va->va_end);
1185}
1186
1187/*
1188 * Find the first free block(lowest start address) in the tree,
1189 * that will accomplish the request corresponding to passing
1190 * parameters.
1191 */
1192static __always_inline struct vmap_area *
1193find_vmap_lowest_match(unsigned long size,
1194        unsigned long align, unsigned long vstart)
1195{
1196        struct vmap_area *va;
1197        struct rb_node *node;
1198
1199        /* Start from the root. */
1200        node = free_vmap_area_root.rb_node;
1201
1202        while (node) {
1203                va = rb_entry(node, struct vmap_area, rb_node);
1204
1205                if (get_subtree_max_size(node->rb_left) >= size &&
1206                                vstart < va->va_start) {
1207                        node = node->rb_left;
1208                } else {
1209                        if (is_within_this_va(va, size, align, vstart))
1210                                return va;
1211
1212                        /*
1213                         * Does not make sense to go deeper towards the right
1214                         * sub-tree if it does not have a free block that is
1215                         * equal or bigger to the requested search size.
1216                         */
1217                        if (get_subtree_max_size(node->rb_right) >= size) {
1218                                node = node->rb_right;
1219                                continue;
1220                        }
1221
1222                        /*
1223                         * OK. We roll back and find the first right sub-tree,
1224                         * that will satisfy the search criteria. It can happen
1225                         * due to "vstart" restriction or an alignment overhead
1226                         * that is bigger then PAGE_SIZE.
1227                         */
1228                        while ((node = rb_parent(node))) {
1229                                va = rb_entry(node, struct vmap_area, rb_node);
1230                                if (is_within_this_va(va, size, align, vstart))
1231                                        return va;
1232
1233                                if (get_subtree_max_size(node->rb_right) >= size &&
1234                                                vstart <= va->va_start) {
1235                                        /*
1236                                         * Shift the vstart forward. Please note, we update it with
1237                                         * parent's start address adding "1" because we do not want
1238                                         * to enter same sub-tree after it has already been checked
1239                                         * and no suitable free block found there.
1240                                         */
1241                                        vstart = va->va_start + 1;
1242                                        node = node->rb_right;
1243                                        break;
1244                                }
1245                        }
1246                }
1247        }
1248
1249        return NULL;
1250}
1251
1252#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1253#include <linux/random.h>
1254
1255static struct vmap_area *
1256find_vmap_lowest_linear_match(unsigned long size,
1257        unsigned long align, unsigned long vstart)
1258{
1259        struct vmap_area *va;
1260
1261        list_for_each_entry(va, &free_vmap_area_list, list) {
1262                if (!is_within_this_va(va, size, align, vstart))
1263                        continue;
1264
1265                return va;
1266        }
1267
1268        return NULL;
1269}
1270
1271static void
1272find_vmap_lowest_match_check(unsigned long size, unsigned long align)
1273{
1274        struct vmap_area *va_1, *va_2;
1275        unsigned long vstart;
1276        unsigned int rnd;
1277
1278        get_random_bytes(&rnd, sizeof(rnd));
1279        vstart = VMALLOC_START + rnd;
1280
1281        va_1 = find_vmap_lowest_match(size, align, vstart);
1282        va_2 = find_vmap_lowest_linear_match(size, align, vstart);
1283
1284        if (va_1 != va_2)
1285                pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1286                        va_1, va_2, vstart);
1287}
1288#endif
1289
1290enum fit_type {
1291        NOTHING_FIT = 0,
1292        FL_FIT_TYPE = 1,        /* full fit */
1293        LE_FIT_TYPE = 2,        /* left edge fit */
1294        RE_FIT_TYPE = 3,        /* right edge fit */
1295        NE_FIT_TYPE = 4         /* no edge fit */
1296};
1297
1298static __always_inline enum fit_type
1299classify_va_fit_type(struct vmap_area *va,
1300        unsigned long nva_start_addr, unsigned long size)
1301{
1302        enum fit_type type;
1303
1304        /* Check if it is within VA. */
1305        if (nva_start_addr < va->va_start ||
1306                        nva_start_addr + size > va->va_end)
1307                return NOTHING_FIT;
1308
1309        /* Now classify. */
1310        if (va->va_start == nva_start_addr) {
1311                if (va->va_end == nva_start_addr + size)
1312                        type = FL_FIT_TYPE;
1313                else
1314                        type = LE_FIT_TYPE;
1315        } else if (va->va_end == nva_start_addr + size) {
1316                type = RE_FIT_TYPE;
1317        } else {
1318                type = NE_FIT_TYPE;
1319        }
1320
1321        return type;
1322}
1323
1324static __always_inline int
1325adjust_va_to_fit_type(struct vmap_area *va,
1326        unsigned long nva_start_addr, unsigned long size,
1327        enum fit_type type)
1328{
1329        struct vmap_area *lva = NULL;
1330
1331        if (type == FL_FIT_TYPE) {
1332                /*
1333                 * No need to split VA, it fully fits.
1334                 *
1335                 * |               |
1336                 * V      NVA      V
1337                 * |---------------|
1338                 */
1339                unlink_va(va, &free_vmap_area_root);
1340                kmem_cache_free(vmap_area_cachep, va);
1341        } else if (type == LE_FIT_TYPE) {
1342                /*
1343                 * Split left edge of fit VA.
1344                 *
1345                 * |       |
1346                 * V  NVA  V   R
1347                 * |-------|-------|
1348                 */
1349                va->va_start += size;
1350        } else if (type == RE_FIT_TYPE) {
1351                /*
1352                 * Split right edge of fit VA.
1353                 *
1354                 *         |       |
1355                 *     L   V  NVA  V
1356                 * |-------|-------|
1357                 */
1358                va->va_end = nva_start_addr;
1359        } else if (type == NE_FIT_TYPE) {
1360                /*
1361                 * Split no edge of fit VA.
1362                 *
1363                 *     |       |
1364                 *   L V  NVA  V R
1365                 * |---|-------|---|
1366                 */
1367                lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1368                if (unlikely(!lva)) {
1369                        /*
1370                         * For percpu allocator we do not do any pre-allocation
1371                         * and leave it as it is. The reason is it most likely
1372                         * never ends up with NE_FIT_TYPE splitting. In case of
1373                         * percpu allocations offsets and sizes are aligned to
1374                         * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1375                         * are its main fitting cases.
1376                         *
1377                         * There are a few exceptions though, as an example it is
1378                         * a first allocation (early boot up) when we have "one"
1379                         * big free space that has to be split.
1380                         *
1381                         * Also we can hit this path in case of regular "vmap"
1382                         * allocations, if "this" current CPU was not preloaded.
1383                         * See the comment in alloc_vmap_area() why. If so, then
1384                         * GFP_NOWAIT is used instead to get an extra object for
1385                         * split purpose. That is rare and most time does not
1386                         * occur.
1387                         *
1388                         * What happens if an allocation gets failed. Basically,
1389                         * an "overflow" path is triggered to purge lazily freed
1390                         * areas to free some memory, then, the "retry" path is
1391                         * triggered to repeat one more time. See more details
1392                         * in alloc_vmap_area() function.
1393                         */
1394                        lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1395                        if (!lva)
1396                                return -1;
1397                }
1398
1399                /*
1400                 * Build the remainder.
1401                 */
1402                lva->va_start = va->va_start;
1403                lva->va_end = nva_start_addr;
1404
1405                /*
1406                 * Shrink this VA to remaining size.
1407                 */
1408                va->va_start = nva_start_addr + size;
1409        } else {
1410                return -1;
1411        }
1412
1413        if (type != FL_FIT_TYPE) {
1414                augment_tree_propagate_from(va);
1415
1416                if (lva)        /* type == NE_FIT_TYPE */
1417                        insert_vmap_area_augment(lva, &va->rb_node,
1418                                &free_vmap_area_root, &free_vmap_area_list);
1419        }
1420
1421        return 0;
1422}
1423
1424/*
1425 * Returns a start address of the newly allocated area, if success.
1426 * Otherwise a vend is returned that indicates failure.
1427 */
1428static __always_inline unsigned long
1429__alloc_vmap_area(unsigned long size, unsigned long align,
1430        unsigned long vstart, unsigned long vend)
1431{
1432        unsigned long nva_start_addr;
1433        struct vmap_area *va;
1434        enum fit_type type;
1435        int ret;
1436
1437        va = find_vmap_lowest_match(size, align, vstart);
1438        if (unlikely(!va))
1439                return vend;
1440
1441        if (va->va_start > vstart)
1442                nva_start_addr = ALIGN(va->va_start, align);
1443        else
1444                nva_start_addr = ALIGN(vstart, align);
1445
1446        /* Check the "vend" restriction. */
1447        if (nva_start_addr + size > vend)
1448                return vend;
1449
1450        /* Classify what we have found. */
1451        type = classify_va_fit_type(va, nva_start_addr, size);
1452        if (WARN_ON_ONCE(type == NOTHING_FIT))
1453                return vend;
1454
1455        /* Update the free vmap_area. */
1456        ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1457        if (ret)
1458                return vend;
1459
1460#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1461        find_vmap_lowest_match_check(size, align);
1462#endif
1463
1464        return nva_start_addr;
1465}
1466
1467/*
1468 * Free a region of KVA allocated by alloc_vmap_area
1469 */
1470static void free_vmap_area(struct vmap_area *va)
1471{
1472        /*
1473         * Remove from the busy tree/list.
1474         */
1475        spin_lock(&vmap_area_lock);
1476        unlink_va(va, &vmap_area_root);
1477        spin_unlock(&vmap_area_lock);
1478
1479        /*
1480         * Insert/Merge it back to the free tree/list.
1481         */
1482        spin_lock(&free_vmap_area_lock);
1483        merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1484        spin_unlock(&free_vmap_area_lock);
1485}
1486
1487static inline void
1488preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1489{
1490        struct vmap_area *va = NULL;
1491
1492        /*
1493         * Preload this CPU with one extra vmap_area object. It is used
1494         * when fit type of free area is NE_FIT_TYPE. It guarantees that
1495         * a CPU that does an allocation is preloaded.
1496         *
1497         * We do it in non-atomic context, thus it allows us to use more
1498         * permissive allocation masks to be more stable under low memory
1499         * condition and high memory pressure.
1500         */
1501        if (!this_cpu_read(ne_fit_preload_node))
1502                va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1503
1504        spin_lock(lock);
1505
1506        if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1507                kmem_cache_free(vmap_area_cachep, va);
1508}
1509
1510/*
1511 * Allocate a region of KVA of the specified size and alignment, within the
1512 * vstart and vend.
1513 */
1514static struct vmap_area *alloc_vmap_area(unsigned long size,
1515                                unsigned long align,
1516                                unsigned long vstart, unsigned long vend,
1517                                int node, gfp_t gfp_mask)
1518{
1519        struct vmap_area *va;
1520        unsigned long freed;
1521        unsigned long addr;
1522        int purged = 0;
1523        int ret;
1524
1525        BUG_ON(!size);
1526        BUG_ON(offset_in_page(size));
1527        BUG_ON(!is_power_of_2(align));
1528
1529        if (unlikely(!vmap_initialized))
1530                return ERR_PTR(-EBUSY);
1531
1532        might_sleep();
1533        gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1534
1535        va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1536        if (unlikely(!va))
1537                return ERR_PTR(-ENOMEM);
1538
1539        /*
1540         * Only scan the relevant parts containing pointers to other objects
1541         * to avoid false negatives.
1542         */
1543        kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1544
1545retry:
1546        preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1547        addr = __alloc_vmap_area(size, align, vstart, vend);
1548        spin_unlock(&free_vmap_area_lock);
1549
1550        /*
1551         * If an allocation fails, the "vend" address is
1552         * returned. Therefore trigger the overflow path.
1553         */
1554        if (unlikely(addr == vend))
1555                goto overflow;
1556
1557        va->va_start = addr;
1558        va->va_end = addr + size;
1559        va->vm = NULL;
1560
1561        spin_lock(&vmap_area_lock);
1562        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1563        spin_unlock(&vmap_area_lock);
1564
1565        BUG_ON(!IS_ALIGNED(va->va_start, align));
1566        BUG_ON(va->va_start < vstart);
1567        BUG_ON(va->va_end > vend);
1568
1569        ret = kasan_populate_vmalloc(addr, size);
1570        if (ret) {
1571                free_vmap_area(va);
1572                return ERR_PTR(ret);
1573        }
1574
1575        return va;
1576
1577overflow:
1578        if (!purged) {
1579                purge_vmap_area_lazy();
1580                purged = 1;
1581                goto retry;
1582        }
1583
1584        freed = 0;
1585        blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1586
1587        if (freed > 0) {
1588                purged = 0;
1589                goto retry;
1590        }
1591
1592        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1593                pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1594                        size);
1595
1596        kmem_cache_free(vmap_area_cachep, va);
1597        return ERR_PTR(-EBUSY);
1598}
1599
1600int register_vmap_purge_notifier(struct notifier_block *nb)
1601{
1602        return blocking_notifier_chain_register(&vmap_notify_list, nb);
1603}
1604EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1605
1606int unregister_vmap_purge_notifier(struct notifier_block *nb)
1607{
1608        return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1609}
1610EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1611
1612/*
1613 * lazy_max_pages is the maximum amount of virtual address space we gather up
1614 * before attempting to purge with a TLB flush.
1615 *
1616 * There is a tradeoff here: a larger number will cover more kernel page tables
1617 * and take slightly longer to purge, but it will linearly reduce the number of
1618 * global TLB flushes that must be performed. It would seem natural to scale
1619 * this number up linearly with the number of CPUs (because vmapping activity
1620 * could also scale linearly with the number of CPUs), however it is likely
1621 * that in practice, workloads might be constrained in other ways that mean
1622 * vmap activity will not scale linearly with CPUs. Also, I want to be
1623 * conservative and not introduce a big latency on huge systems, so go with
1624 * a less aggressive log scale. It will still be an improvement over the old
1625 * code, and it will be simple to change the scale factor if we find that it
1626 * becomes a problem on bigger systems.
1627 */
1628static unsigned long lazy_max_pages(void)
1629{
1630        unsigned int log;
1631
1632        log = fls(num_online_cpus());
1633
1634        return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1635}
1636
1637static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1638
1639/*
1640 * Serialize vmap purging.  There is no actual critical section protected
1641 * by this look, but we want to avoid concurrent calls for performance
1642 * reasons and to make the pcpu_get_vm_areas more deterministic.
1643 */
1644static DEFINE_MUTEX(vmap_purge_lock);
1645
1646/* for per-CPU blocks */
1647static void purge_fragmented_blocks_allcpus(void);
1648
1649#ifdef CONFIG_X86_64
1650/*
1651 * called before a call to iounmap() if the caller wants vm_area_struct's
1652 * immediately freed.
1653 */
1654void set_iounmap_nonlazy(void)
1655{
1656        atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1657}
1658#endif /* CONFIG_X86_64 */
1659
1660/*
1661 * Purges all lazily-freed vmap areas.
1662 */
1663static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1664{
1665        unsigned long resched_threshold;
1666        struct list_head local_pure_list;
1667        struct vmap_area *va, *n_va;
1668
1669        lockdep_assert_held(&vmap_purge_lock);
1670
1671        spin_lock(&purge_vmap_area_lock);
1672        purge_vmap_area_root = RB_ROOT;
1673        list_replace_init(&purge_vmap_area_list, &local_pure_list);
1674        spin_unlock(&purge_vmap_area_lock);
1675
1676        if (unlikely(list_empty(&local_pure_list)))
1677                return false;
1678
1679        start = min(start,
1680                list_first_entry(&local_pure_list,
1681                        struct vmap_area, list)->va_start);
1682
1683        end = max(end,
1684                list_last_entry(&local_pure_list,
1685                        struct vmap_area, list)->va_end);
1686
1687        flush_tlb_kernel_range(start, end);
1688        resched_threshold = lazy_max_pages() << 1;
1689
1690        spin_lock(&free_vmap_area_lock);
1691        list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1692                unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1693                unsigned long orig_start = va->va_start;
1694                unsigned long orig_end = va->va_end;
1695
1696                /*
1697                 * Finally insert or merge lazily-freed area. It is
1698                 * detached and there is no need to "unlink" it from
1699                 * anything.
1700                 */
1701                va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1702                                &free_vmap_area_list);
1703
1704                if (!va)
1705                        continue;
1706
1707                if (is_vmalloc_or_module_addr((void *)orig_start))
1708                        kasan_release_vmalloc(orig_start, orig_end,
1709                                              va->va_start, va->va_end);
1710
1711                atomic_long_sub(nr, &vmap_lazy_nr);
1712
1713                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1714                        cond_resched_lock(&free_vmap_area_lock);
1715        }
1716        spin_unlock(&free_vmap_area_lock);
1717        return true;
1718}
1719
1720/*
1721 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1722 * is already purging.
1723 */
1724static void try_purge_vmap_area_lazy(void)
1725{
1726        if (mutex_trylock(&vmap_purge_lock)) {
1727                __purge_vmap_area_lazy(ULONG_MAX, 0);
1728                mutex_unlock(&vmap_purge_lock);
1729        }
1730}
1731
1732/*
1733 * Kick off a purge of the outstanding lazy areas.
1734 */
1735static void purge_vmap_area_lazy(void)
1736{
1737        mutex_lock(&vmap_purge_lock);
1738        purge_fragmented_blocks_allcpus();
1739        __purge_vmap_area_lazy(ULONG_MAX, 0);
1740        mutex_unlock(&vmap_purge_lock);
1741}
1742
1743/*
1744 * Free a vmap area, caller ensuring that the area has been unmapped
1745 * and flush_cache_vunmap had been called for the correct range
1746 * previously.
1747 */
1748static void free_vmap_area_noflush(struct vmap_area *va)
1749{
1750        unsigned long nr_lazy;
1751
1752        spin_lock(&vmap_area_lock);
1753        unlink_va(va, &vmap_area_root);
1754        spin_unlock(&vmap_area_lock);
1755
1756        nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1757                                PAGE_SHIFT, &vmap_lazy_nr);
1758
1759        /*
1760         * Merge or place it to the purge tree/list.
1761         */
1762        spin_lock(&purge_vmap_area_lock);
1763        merge_or_add_vmap_area(va,
1764                &purge_vmap_area_root, &purge_vmap_area_list);
1765        spin_unlock(&purge_vmap_area_lock);
1766
1767        /* After this point, we may free va at any time */
1768        if (unlikely(nr_lazy > lazy_max_pages()))
1769                try_purge_vmap_area_lazy();
1770}
1771
1772/*
1773 * Free and unmap a vmap area
1774 */
1775static void free_unmap_vmap_area(struct vmap_area *va)
1776{
1777        flush_cache_vunmap(va->va_start, va->va_end);
1778        vunmap_range_noflush(va->va_start, va->va_end);
1779        if (debug_pagealloc_enabled_static())
1780                flush_tlb_kernel_range(va->va_start, va->va_end);
1781
1782        free_vmap_area_noflush(va);
1783}
1784
1785static struct vmap_area *find_vmap_area(unsigned long addr)
1786{
1787        struct vmap_area *va;
1788
1789        spin_lock(&vmap_area_lock);
1790        va = __find_vmap_area(addr);
1791        spin_unlock(&vmap_area_lock);
1792
1793        return va;
1794}
1795
1796/*** Per cpu kva allocator ***/
1797
1798/*
1799 * vmap space is limited especially on 32 bit architectures. Ensure there is
1800 * room for at least 16 percpu vmap blocks per CPU.
1801 */
1802/*
1803 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1804 * to #define VMALLOC_SPACE             (VMALLOC_END-VMALLOC_START). Guess
1805 * instead (we just need a rough idea)
1806 */
1807#if BITS_PER_LONG == 32
1808#define VMALLOC_SPACE           (128UL*1024*1024)
1809#else
1810#define VMALLOC_SPACE           (128UL*1024*1024*1024)
1811#endif
1812
1813#define VMALLOC_PAGES           (VMALLOC_SPACE / PAGE_SIZE)
1814#define VMAP_MAX_ALLOC          BITS_PER_LONG   /* 256K with 4K pages */
1815#define VMAP_BBMAP_BITS_MAX     1024    /* 4MB with 4K pages */
1816#define VMAP_BBMAP_BITS_MIN     (VMAP_MAX_ALLOC*2)
1817#define VMAP_MIN(x, y)          ((x) < (y) ? (x) : (y)) /* can't use min() */
1818#define VMAP_MAX(x, y)          ((x) > (y) ? (x) : (y)) /* can't use max() */
1819#define VMAP_BBMAP_BITS         \
1820                VMAP_MIN(VMAP_BBMAP_BITS_MAX,   \
1821                VMAP_MAX(VMAP_BBMAP_BITS_MIN,   \
1822                        VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1823
1824#define VMAP_BLOCK_SIZE         (VMAP_BBMAP_BITS * PAGE_SIZE)
1825
1826struct vmap_block_queue {
1827        spinlock_t lock;
1828        struct list_head free;
1829};
1830
1831struct vmap_block {
1832        spinlock_t lock;
1833        struct vmap_area *va;
1834        unsigned long free, dirty;
1835        unsigned long dirty_min, dirty_max; /*< dirty range */
1836        struct list_head free_list;
1837        struct rcu_head rcu_head;
1838        struct list_head purge;
1839};
1840
1841/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1842static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1843
1844/*
1845 * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1846 * in the free path. Could get rid of this if we change the API to return a
1847 * "cookie" from alloc, to be passed to free. But no big deal yet.
1848 */
1849static DEFINE_XARRAY(vmap_blocks);
1850
1851/*
1852 * We should probably have a fallback mechanism to allocate virtual memory
1853 * out of partially filled vmap blocks. However vmap block sizing should be
1854 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1855 * big problem.
1856 */
1857
1858static unsigned long addr_to_vb_idx(unsigned long addr)
1859{
1860        addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1861        addr /= VMAP_BLOCK_SIZE;
1862        return addr;
1863}
1864
1865static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1866{
1867        unsigned long addr;
1868
1869        addr = va_start + (pages_off << PAGE_SHIFT);
1870        BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1871        return (void *)addr;
1872}
1873
1874/**
1875 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1876 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1877 * @order:    how many 2^order pages should be occupied in newly allocated block
1878 * @gfp_mask: flags for the page level allocator
1879 *
1880 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1881 */
1882static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1883{
1884        struct vmap_block_queue *vbq;
1885        struct vmap_block *vb;
1886        struct vmap_area *va;
1887        unsigned long vb_idx;
1888        int node, err;
1889        void *vaddr;
1890
1891        node = numa_node_id();
1892
1893        vb = kmalloc_node(sizeof(struct vmap_block),
1894                        gfp_mask & GFP_RECLAIM_MASK, node);
1895        if (unlikely(!vb))
1896                return ERR_PTR(-ENOMEM);
1897
1898        va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1899                                        VMALLOC_START, VMALLOC_END,
1900                                        node, gfp_mask);
1901        if (IS_ERR(va)) {
1902                kfree(vb);
1903                return ERR_CAST(va);
1904        }
1905
1906        vaddr = vmap_block_vaddr(va->va_start, 0);
1907        spin_lock_init(&vb->lock);
1908        vb->va = va;
1909        /* At least something should be left free */
1910        BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1911        vb->free = VMAP_BBMAP_BITS - (1UL << order);
1912        vb->dirty = 0;
1913        vb->dirty_min = VMAP_BBMAP_BITS;
1914        vb->dirty_max = 0;
1915        INIT_LIST_HEAD(&vb->free_list);
1916
1917        vb_idx = addr_to_vb_idx(va->va_start);
1918        err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1919        if (err) {
1920                kfree(vb);
1921                free_vmap_area(va);
1922                return ERR_PTR(err);
1923        }
1924
1925        vbq = &get_cpu_var(vmap_block_queue);
1926        spin_lock(&vbq->lock);
1927        list_add_tail_rcu(&vb->free_list, &vbq->free);
1928        spin_unlock(&vbq->lock);
1929        put_cpu_var(vmap_block_queue);
1930
1931        return vaddr;
1932}
1933
1934static void free_vmap_block(struct vmap_block *vb)
1935{
1936        struct vmap_block *tmp;
1937
1938        tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1939        BUG_ON(tmp != vb);
1940
1941        free_vmap_area_noflush(vb->va);
1942        kfree_rcu(vb, rcu_head);
1943}
1944
1945static void purge_fragmented_blocks(int cpu)
1946{
1947        LIST_HEAD(purge);
1948        struct vmap_block *vb;
1949        struct vmap_block *n_vb;
1950        struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1951
1952        rcu_read_lock();
1953        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1954
1955                if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1956                        continue;
1957
1958                spin_lock(&vb->lock);
1959                if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1960                        vb->free = 0; /* prevent further allocs after releasing lock */
1961                        vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1962                        vb->dirty_min = 0;
1963                        vb->dirty_max = VMAP_BBMAP_BITS;
1964                        spin_lock(&vbq->lock);
1965                        list_del_rcu(&vb->free_list);
1966                        spin_unlock(&vbq->lock);
1967                        spin_unlock(&vb->lock);
1968                        list_add_tail(&vb->purge, &purge);
1969                } else
1970                        spin_unlock(&vb->lock);
1971        }
1972        rcu_read_unlock();
1973
1974        list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1975                list_del(&vb->purge);
1976                free_vmap_block(vb);
1977        }
1978}
1979
1980static void purge_fragmented_blocks_allcpus(void)
1981{
1982        int cpu;
1983
1984        for_each_possible_cpu(cpu)
1985                purge_fragmented_blocks(cpu);
1986}
1987
1988static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1989{
1990        struct vmap_block_queue *vbq;
1991        struct vmap_block *vb;
1992        void *vaddr = NULL;
1993        unsigned int order;
1994
1995        BUG_ON(offset_in_page(size));
1996        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1997        if (WARN_ON(size == 0)) {
1998                /*
1999                 * Allocating 0 bytes isn't what caller wants since
2000                 * get_order(0) returns funny result. Just warn and terminate
2001                 * early.
2002                 */
2003                return NULL;
2004        }
2005        order = get_order(size);
2006
2007        rcu_read_lock();
2008        vbq = &get_cpu_var(vmap_block_queue);
2009        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2010                unsigned long pages_off;
2011
2012                spin_lock(&vb->lock);
2013                if (vb->free < (1UL << order)) {
2014                        spin_unlock(&vb->lock);
2015                        continue;
2016                }
2017
2018                pages_off = VMAP_BBMAP_BITS - vb->free;
2019                vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2020                vb->free -= 1UL << order;
2021                if (vb->free == 0) {
2022                        spin_lock(&vbq->lock);
2023                        list_del_rcu(&vb->free_list);
2024                        spin_unlock(&vbq->lock);
2025                }
2026
2027                spin_unlock(&vb->lock);
2028                break;
2029        }
2030
2031        put_cpu_var(vmap_block_queue);
2032        rcu_read_unlock();
2033
2034        /* Allocate new block if nothing was found */
2035        if (!vaddr)
2036                vaddr = new_vmap_block(order, gfp_mask);
2037
2038        return vaddr;
2039}
2040
2041static void vb_free(unsigned long addr, unsigned long size)
2042{
2043        unsigned long offset;
2044        unsigned int order;
2045        struct vmap_block *vb;
2046
2047        BUG_ON(offset_in_page(size));
2048        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2049
2050        flush_cache_vunmap(addr, addr + size);
2051
2052        order = get_order(size);
2053        offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2054        vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
2055
2056        vunmap_range_noflush(addr, addr + size);
2057
2058        if (debug_pagealloc_enabled_static())
2059                flush_tlb_kernel_range(addr, addr + size);
2060
2061        spin_lock(&vb->lock);
2062
2063        /* Expand dirty range */
2064        vb->dirty_min = min(vb->dirty_min, offset);
2065        vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2066
2067        vb->dirty += 1UL << order;
2068        if (vb->dirty == VMAP_BBMAP_BITS) {
2069                BUG_ON(vb->free);
2070                spin_unlock(&vb->lock);
2071                free_vmap_block(vb);
2072        } else
2073                spin_unlock(&vb->lock);
2074}
2075
2076static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2077{
2078        int cpu;
2079
2080        if (unlikely(!vmap_initialized))
2081                return;
2082
2083        might_sleep();
2084
2085        for_each_possible_cpu(cpu) {
2086                struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2087                struct vmap_block *vb;
2088
2089                rcu_read_lock();
2090                list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2091                        spin_lock(&vb->lock);
2092                        if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2093                                unsigned long va_start = vb->va->va_start;
2094                                unsigned long s, e;
2095
2096                                s = va_start + (vb->dirty_min << PAGE_SHIFT);
2097                                e = va_start + (vb->dirty_max << PAGE_SHIFT);
2098
2099                                start = min(s, start);
2100                                end   = max(e, end);
2101
2102                                flush = 1;
2103                        }
2104                        spin_unlock(&vb->lock);
2105                }
2106                rcu_read_unlock();
2107        }
2108
2109        mutex_lock(&vmap_purge_lock);
2110        purge_fragmented_blocks_allcpus();
2111        if (!__purge_vmap_area_lazy(start, end) && flush)
2112                flush_tlb_kernel_range(start, end);
2113        mutex_unlock(&vmap_purge_lock);
2114}
2115
2116/**
2117 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2118 *
2119 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2120 * to amortize TLB flushing overheads. What this means is that any page you
2121 * have now, may, in a former life, have been mapped into kernel virtual
2122 * address by the vmap layer and so there might be some CPUs with TLB entries
2123 * still referencing that page (additional to the regular 1:1 kernel mapping).
2124 *
2125 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2126 * be sure that none of the pages we have control over will have any aliases
2127 * from the vmap layer.
2128 */
2129void vm_unmap_aliases(void)
2130{
2131        unsigned long start = ULONG_MAX, end = 0;
2132        int flush = 0;
2133
2134        _vm_unmap_aliases(start, end, flush);
2135}
2136EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2137
2138/**
2139 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2140 * @mem: the pointer returned by vm_map_ram
2141 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2142 */
2143void vm_unmap_ram(const void *mem, unsigned int count)
2144{
2145        unsigned long size = (unsigned long)count << PAGE_SHIFT;
2146        unsigned long addr = (unsigned long)mem;
2147        struct vmap_area *va;
2148
2149        might_sleep();
2150        BUG_ON(!addr);
2151        BUG_ON(addr < VMALLOC_START);
2152        BUG_ON(addr > VMALLOC_END);
2153        BUG_ON(!PAGE_ALIGNED(addr));
2154
2155        kasan_poison_vmalloc(mem, size);
2156
2157        if (likely(count <= VMAP_MAX_ALLOC)) {
2158                debug_check_no_locks_freed(mem, size);
2159                vb_free(addr, size);
2160                return;
2161        }
2162
2163        va = find_vmap_area(addr);
2164        BUG_ON(!va);
2165        debug_check_no_locks_freed((void *)va->va_start,
2166                                    (va->va_end - va->va_start));
2167        free_unmap_vmap_area(va);
2168}
2169EXPORT_SYMBOL(vm_unmap_ram);
2170
2171/**
2172 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2173 * @pages: an array of pointers to the pages to be mapped
2174 * @count: number of pages
2175 * @node: prefer to allocate data structures on this node
2176 *
2177 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2178 * faster than vmap so it's good.  But if you mix long-life and short-life
2179 * objects with vm_map_ram(), it could consume lots of address space through
2180 * fragmentation (especially on a 32bit machine).  You could see failures in
2181 * the end.  Please use this function for short-lived objects.
2182 *
2183 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2184 */
2185void *vm_map_ram(struct page **pages, unsigned int count, int node)
2186{
2187        unsigned long size = (unsigned long)count << PAGE_SHIFT;
2188        unsigned long addr;
2189        void *mem;
2190
2191        if (likely(count <= VMAP_MAX_ALLOC)) {
2192                mem = vb_alloc(size, GFP_KERNEL);
2193                if (IS_ERR(mem))
2194                        return NULL;
2195                addr = (unsigned long)mem;
2196        } else {
2197                struct vmap_area *va;
2198                va = alloc_vmap_area(size, PAGE_SIZE,
2199                                VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2200                if (IS_ERR(va))
2201                        return NULL;
2202
2203                addr = va->va_start;
2204                mem = (void *)addr;
2205        }
2206
2207        kasan_unpoison_vmalloc(mem, size);
2208
2209        if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2210                                pages, PAGE_SHIFT) < 0) {
2211                vm_unmap_ram(mem, count);
2212                return NULL;
2213        }
2214
2215        return mem;
2216}
2217EXPORT_SYMBOL(vm_map_ram);
2218
2219static struct vm_struct *vmlist __initdata;
2220
2221static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2222{
2223#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2224        return vm->page_order;
2225#else
2226        return 0;
2227#endif
2228}
2229
2230static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2231{
2232#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2233        vm->page_order = order;
2234#else
2235        BUG_ON(order != 0);
2236#endif
2237}
2238
2239/**
2240 * vm_area_add_early - add vmap area early during boot
2241 * @vm: vm_struct to add
2242 *
2243 * This function is used to add fixed kernel vm area to vmlist before
2244 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2245 * should contain proper values and the other fields should be zero.
2246 *
2247 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2248 */
2249void __init vm_area_add_early(struct vm_struct *vm)
2250{
2251        struct vm_struct *tmp, **p;
2252
2253        BUG_ON(vmap_initialized);
2254        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2255                if (tmp->addr >= vm->addr) {
2256                        BUG_ON(tmp->addr < vm->addr + vm->size);
2257                        break;
2258                } else
2259                        BUG_ON(tmp->addr + tmp->size > vm->addr);
2260        }
2261        vm->next = *p;
2262        *p = vm;
2263}
2264
2265/**
2266 * vm_area_register_early - register vmap area early during boot
2267 * @vm: vm_struct to register
2268 * @align: requested alignment
2269 *
2270 * This function is used to register kernel vm area before
2271 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2272 * proper values on entry and other fields should be zero.  On return,
2273 * vm->addr contains the allocated address.
2274 *
2275 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2276 */
2277void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2278{
2279        unsigned long addr = ALIGN(VMALLOC_START, align);
2280        struct vm_struct *cur, **p;
2281
2282        BUG_ON(vmap_initialized);
2283
2284        for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
2285                if ((unsigned long)cur->addr - addr >= vm->size)
2286                        break;
2287                addr = ALIGN((unsigned long)cur->addr + cur->size, align);
2288        }
2289
2290        BUG_ON(addr > VMALLOC_END - vm->size);
2291        vm->addr = (void *)addr;
2292        vm->next = *p;
2293        *p = vm;
2294        kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
2295}
2296
2297static void vmap_init_free_space(void)
2298{
2299        unsigned long vmap_start = 1;
2300        const unsigned long vmap_end = ULONG_MAX;
2301        struct vmap_area *busy, *free;
2302
2303        /*
2304         *     B     F     B     B     B     F
2305         * -|-----|.....|-----|-----|-----|.....|-
2306         *  |           The KVA space           |
2307         *  |<--------------------------------->|
2308         */
2309        list_for_each_entry(busy, &vmap_area_list, list) {
2310                if (busy->va_start - vmap_start > 0) {
2311                        free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2312                        if (!WARN_ON_ONCE(!free)) {
2313                                free->va_start = vmap_start;
2314                                free->va_end = busy->va_start;
2315
2316                                insert_vmap_area_augment(free, NULL,
2317                                        &free_vmap_area_root,
2318                                                &free_vmap_area_list);
2319                        }
2320                }
2321
2322                vmap_start = busy->va_end;
2323        }
2324
2325        if (vmap_end - vmap_start > 0) {
2326                free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2327                if (!WARN_ON_ONCE(!free)) {
2328                        free->va_start = vmap_start;
2329                        free->va_end = vmap_end;
2330
2331                        insert_vmap_area_augment(free, NULL,
2332                                &free_vmap_area_root,
2333                                        &free_vmap_area_list);
2334                }
2335        }
2336}
2337
2338void __init vmalloc_init(void)
2339{
2340        struct vmap_area *va;
2341        struct vm_struct *tmp;
2342        int i;
2343
2344        /*
2345         * Create the cache for vmap_area objects.
2346         */
2347        vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2348
2349        for_each_possible_cpu(i) {
2350                struct vmap_block_queue *vbq;
2351                struct vfree_deferred *p;
2352
2353                vbq = &per_cpu(vmap_block_queue, i);
2354                spin_lock_init(&vbq->lock);
2355                INIT_LIST_HEAD(&vbq->free);
2356                p = &per_cpu(vfree_deferred, i);
2357                init_llist_head(&p->list);
2358                INIT_WORK(&p->wq, free_work);
2359        }
2360
2361        /* Import existing vmlist entries. */
2362        for (tmp = vmlist; tmp; tmp = tmp->next) {
2363                va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2364                if (WARN_ON_ONCE(!va))
2365                        continue;
2366
2367                va->va_start = (unsigned long)tmp->addr;
2368                va->va_end = va->va_start + tmp->size;
2369                va->vm = tmp;
2370                insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2371        }
2372
2373        /*
2374         * Now we can initialize a free vmap space.
2375         */
2376        vmap_init_free_space();
2377        vmap_initialized = true;
2378}
2379
2380static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2381        struct vmap_area *va, unsigned long flags, const void *caller)
2382{
2383        vm->flags = flags;
2384        vm->addr = (void *)va->va_start;
2385        vm->size = va->va_end - va->va_start;
2386        vm->caller = caller;
2387        va->vm = vm;
2388}
2389
2390static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2391                              unsigned long flags, const void *caller)
2392{
2393        spin_lock(&vmap_area_lock);
2394        setup_vmalloc_vm_locked(vm, va, flags, caller);
2395        spin_unlock(&vmap_area_lock);
2396}
2397
2398static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2399{
2400        /*
2401         * Before removing VM_UNINITIALIZED,
2402         * we should make sure that vm has proper values.
2403         * Pair with smp_rmb() in show_numa_info().
2404         */
2405        smp_wmb();
2406        vm->flags &= ~VM_UNINITIALIZED;
2407}
2408
2409static struct vm_struct *__get_vm_area_node(unsigned long size,
2410                unsigned long align, unsigned long shift, unsigned long flags,
2411                unsigned long start, unsigned long end, int node,
2412                gfp_t gfp_mask, const void *caller)
2413{
2414        struct vmap_area *va;
2415        struct vm_struct *area;
2416        unsigned long requested_size = size;
2417
2418        BUG_ON(in_interrupt());
2419        size = ALIGN(size, 1ul << shift);
2420        if (unlikely(!size))
2421                return NULL;
2422
2423        if (flags & VM_IOREMAP)
2424                align = 1ul << clamp_t(int, get_count_order_long(size),
2425                                       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2426
2427        area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2428        if (unlikely(!area))
2429                return NULL;
2430
2431        if (!(flags & VM_NO_GUARD))
2432                size += PAGE_SIZE;
2433
2434        va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2435        if (IS_ERR(va)) {
2436                kfree(area);
2437                return NULL;
2438        }
2439
2440        kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2441
2442        setup_vmalloc_vm(area, va, flags, caller);
2443
2444        return area;
2445}
2446
2447struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2448                                       unsigned long start, unsigned long end,
2449                                       const void *caller)
2450{
2451        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2452                                  NUMA_NO_NODE, GFP_KERNEL, caller);
2453}
2454
2455/**
2456 * get_vm_area - reserve a contiguous kernel virtual area
2457 * @size:        size of the area
2458 * @flags:       %VM_IOREMAP for I/O mappings or VM_ALLOC
2459 *
2460 * Search an area of @size in the kernel virtual mapping area,
2461 * and reserved it for out purposes.  Returns the area descriptor
2462 * on success or %NULL on failure.
2463 *
2464 * Return: the area descriptor on success or %NULL on failure.
2465 */
2466struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2467{
2468        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2469                                  VMALLOC_START, VMALLOC_END,
2470                                  NUMA_NO_NODE, GFP_KERNEL,
2471                                  __builtin_return_address(0));
2472}
2473
2474struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2475                                const void *caller)
2476{
2477        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2478                                  VMALLOC_START, VMALLOC_END,
2479                                  NUMA_NO_NODE, GFP_KERNEL, caller);
2480}
2481
2482/**
2483 * find_vm_area - find a continuous kernel virtual area
2484 * @addr:         base address
2485 *
2486 * Search for the kernel VM area starting at @addr, and return it.
2487 * It is up to the caller to do all required locking to keep the returned
2488 * pointer valid.
2489 *
2490 * Return: the area descriptor on success or %NULL on failure.
2491 */
2492struct vm_struct *find_vm_area(const void *addr)
2493{
2494        struct vmap_area *va;
2495
2496        va = find_vmap_area((unsigned long)addr);
2497        if (!va)
2498                return NULL;
2499
2500        return va->vm;
2501}
2502
2503/**
2504 * remove_vm_area - find and remove a continuous kernel virtual area
2505 * @addr:           base address
2506 *
2507 * Search for the kernel VM area starting at @addr, and remove it.
2508 * This function returns the found VM area, but using it is NOT safe
2509 * on SMP machines, except for its size or flags.
2510 *
2511 * Return: the area descriptor on success or %NULL on failure.
2512 */
2513struct vm_struct *remove_vm_area(const void *addr)
2514{
2515        struct vmap_area *va;
2516
2517        might_sleep();
2518
2519        spin_lock(&vmap_area_lock);
2520        va = __find_vmap_area((unsigned long)addr);
2521        if (va && va->vm) {
2522                struct vm_struct *vm = va->vm;
2523
2524                va->vm = NULL;
2525                spin_unlock(&vmap_area_lock);
2526
2527                kasan_free_shadow(vm);
2528                free_unmap_vmap_area(va);
2529
2530                return vm;
2531        }
2532
2533        spin_unlock(&vmap_area_lock);
2534        return NULL;
2535}
2536
2537static inline void set_area_direct_map(const struct vm_struct *area,
2538                                       int (*set_direct_map)(struct page *page))
2539{
2540        int i;
2541
2542        /* HUGE_VMALLOC passes small pages to set_direct_map */
2543        for (i = 0; i < area->nr_pages; i++)
2544                if (page_address(area->pages[i]))
2545                        set_direct_map(area->pages[i]);
2546}
2547
2548/* Handle removing and resetting vm mappings related to the vm_struct. */
2549static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2550{
2551        unsigned long start = ULONG_MAX, end = 0;
2552        unsigned int page_order = vm_area_page_order(area);
2553        int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2554        int flush_dmap = 0;
2555        int i;
2556
2557        remove_vm_area(area->addr);
2558
2559        /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2560        if (!flush_reset)
2561                return;
2562
2563        /*
2564         * If not deallocating pages, just do the flush of the VM area and
2565         * return.
2566         */
2567        if (!deallocate_pages) {
2568                vm_unmap_aliases();
2569                return;
2570        }
2571
2572        /*
2573         * If execution gets here, flush the vm mapping and reset the direct
2574         * map. Find the start and end range of the direct mappings to make sure
2575         * the vm_unmap_aliases() flush includes the direct map.
2576         */
2577        for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2578                unsigned long addr = (unsigned long)page_address(area->pages[i]);
2579                if (addr) {
2580                        unsigned long page_size;
2581
2582                        page_size = PAGE_SIZE << page_order;
2583                        start = min(addr, start);
2584                        end = max(addr + page_size, end);
2585                        flush_dmap = 1;
2586                }
2587        }
2588
2589        /*
2590         * Set direct map to something invalid so that it won't be cached if
2591         * there are any accesses after the TLB flush, then flush the TLB and
2592         * reset the direct map permissions to the default.
2593         */
2594        set_area_direct_map(area, set_direct_map_invalid_noflush);
2595        _vm_unmap_aliases(start, end, flush_dmap);
2596        set_area_direct_map(area, set_direct_map_default_noflush);
2597}
2598
2599static void __vunmap(const void *addr, int deallocate_pages)
2600{
2601        struct vm_struct *area;
2602
2603        if (!addr)
2604                return;
2605
2606        if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2607                        addr))
2608                return;
2609
2610        area = find_vm_area(addr);
2611        if (unlikely(!area)) {
2612                WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2613                                addr);
2614                return;
2615        }
2616
2617        debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2618        debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2619
2620        kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2621
2622        vm_remove_mappings(area, deallocate_pages);
2623
2624        if (deallocate_pages) {
2625                unsigned int page_order = vm_area_page_order(area);
2626                int i;
2627
2628                for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2629                        struct page *page = area->pages[i];
2630
2631                        BUG_ON(!page);
2632                        __free_pages(page, page_order);
2633                        cond_resched();
2634                }
2635                atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2636
2637                kvfree(area->pages);
2638        }
2639
2640        kfree(area);
2641}
2642
2643static inline void __vfree_deferred(const void *addr)
2644{
2645        /*
2646         * Use raw_cpu_ptr() because this can be called from preemptible
2647         * context. Preemption is absolutely fine here, because the llist_add()
2648         * implementation is lockless, so it works even if we are adding to
2649         * another cpu's list. schedule_work() should be fine with this too.
2650         */
2651        struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2652
2653        if (llist_add((struct llist_node *)addr, &p->list))
2654                schedule_work(&p->wq);
2655}
2656
2657/**
2658 * vfree_atomic - release memory allocated by vmalloc()
2659 * @addr:         memory base address
2660 *
2661 * This one is just like vfree() but can be called in any atomic context
2662 * except NMIs.
2663 */
2664void vfree_atomic(const void *addr)
2665{
2666        BUG_ON(in_nmi());
2667
2668        kmemleak_free(addr);
2669
2670        if (!addr)
2671                return;
2672        __vfree_deferred(addr);
2673}
2674
2675static void __vfree(const void *addr)
2676{
2677        if (unlikely(in_interrupt()))
2678                __vfree_deferred(addr);
2679        else
2680                __vunmap(addr, 1);
2681}
2682
2683/**
2684 * vfree - Release memory allocated by vmalloc()
2685 * @addr:  Memory base address
2686 *
2687 * Free the virtually continuous memory area starting at @addr, as obtained
2688 * from one of the vmalloc() family of APIs.  This will usually also free the
2689 * physical memory underlying the virtual allocation, but that memory is
2690 * reference counted, so it will not be freed until the last user goes away.
2691 *
2692 * If @addr is NULL, no operation is performed.
2693 *
2694 * Context:
2695 * May sleep if called *not* from interrupt context.
2696 * Must not be called in NMI context (strictly speaking, it could be
2697 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2698 * conventions for vfree() arch-dependent would be a really bad idea).
2699 */
2700void vfree(const void *addr)
2701{
2702        BUG_ON(in_nmi());
2703
2704        kmemleak_free(addr);
2705
2706        might_sleep_if(!in_interrupt());
2707
2708        if (!addr)
2709                return;
2710
2711        __vfree(addr);
2712}
2713EXPORT_SYMBOL(vfree);
2714
2715/**
2716 * vunmap - release virtual mapping obtained by vmap()
2717 * @addr:   memory base address
2718 *
2719 * Free the virtually contiguous memory area starting at @addr,
2720 * which was created from the page array passed to vmap().
2721 *
2722 * Must not be called in interrupt context.
2723 */
2724void vunmap(const void *addr)
2725{
2726        BUG_ON(in_interrupt());
2727        might_sleep();
2728        if (addr)
2729                __vunmap(addr, 0);
2730}
2731EXPORT_SYMBOL(vunmap);
2732
2733/**
2734 * vmap - map an array of pages into virtually contiguous space
2735 * @pages: array of page pointers
2736 * @count: number of pages to map
2737 * @flags: vm_area->flags
2738 * @prot: page protection for the mapping
2739 *
2740 * Maps @count pages from @pages into contiguous kernel virtual space.
2741 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2742 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2743 * are transferred from the caller to vmap(), and will be freed / dropped when
2744 * vfree() is called on the return value.
2745 *
2746 * Return: the address of the area or %NULL on failure
2747 */
2748void *vmap(struct page **pages, unsigned int count,
2749           unsigned long flags, pgprot_t prot)
2750{
2751        struct vm_struct *area;
2752        unsigned long addr;
2753        unsigned long size;             /* In bytes */
2754
2755        might_sleep();
2756
2757        /*
2758         * Your top guard is someone else's bottom guard. Not having a top
2759         * guard compromises someone else's mappings too.
2760         */
2761        if (WARN_ON_ONCE(flags & VM_NO_GUARD))
2762                flags &= ~VM_NO_GUARD;
2763
2764        if (count > totalram_pages())
2765                return NULL;
2766
2767        size = (unsigned long)count << PAGE_SHIFT;
2768        area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2769        if (!area)
2770                return NULL;
2771
2772        addr = (unsigned long)area->addr;
2773        if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2774                                pages, PAGE_SHIFT) < 0) {
2775                vunmap(area->addr);
2776                return NULL;
2777        }
2778
2779        if (flags & VM_MAP_PUT_PAGES) {
2780                area->pages = pages;
2781                area->nr_pages = count;
2782        }
2783        return area->addr;
2784}
2785EXPORT_SYMBOL(vmap);
2786
2787#ifdef CONFIG_VMAP_PFN
2788struct vmap_pfn_data {
2789        unsigned long   *pfns;
2790        pgprot_t        prot;
2791        unsigned int    idx;
2792};
2793
2794static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2795{
2796        struct vmap_pfn_data *data = private;
2797
2798        if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2799                return -EINVAL;
2800        *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2801        return 0;
2802}
2803
2804/**
2805 * vmap_pfn - map an array of PFNs into virtually contiguous space
2806 * @pfns: array of PFNs
2807 * @count: number of pages to map
2808 * @prot: page protection for the mapping
2809 *
2810 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2811 * the start address of the mapping.
2812 */
2813void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2814{
2815        struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2816        struct vm_struct *area;
2817
2818        area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2819                        __builtin_return_address(0));
2820        if (!area)
2821                return NULL;
2822        if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2823                        count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2824                free_vm_area(area);
2825                return NULL;
2826        }
2827        return area->addr;
2828}
2829EXPORT_SYMBOL_GPL(vmap_pfn);
2830#endif /* CONFIG_VMAP_PFN */
2831
2832static inline unsigned int
2833vm_area_alloc_pages(gfp_t gfp, int nid,
2834                unsigned int order, unsigned int nr_pages, struct page **pages)
2835{
2836        unsigned int nr_allocated = 0;
2837        struct page *page;
2838        int i;
2839
2840        /*
2841         * For order-0 pages we make use of bulk allocator, if
2842         * the page array is partly or not at all populated due
2843         * to fails, fallback to a single page allocator that is
2844         * more permissive.
2845         */
2846        if (!order) {
2847                while (nr_allocated < nr_pages) {
2848                        unsigned int nr, nr_pages_request;
2849
2850                        /*
2851                         * A maximum allowed request is hard-coded and is 100
2852                         * pages per call. That is done in order to prevent a
2853                         * long preemption off scenario in the bulk-allocator
2854                         * so the range is [1:100].
2855                         */
2856                        nr_pages_request = min(100U, nr_pages - nr_allocated);
2857
2858                        /* memory allocation should consider mempolicy, we can't
2859                         * wrongly use nearest node when nid == NUMA_NO_NODE,
2860                         * otherwise memory may be allocated in only one node,
2861                         * but mempolcy want to alloc memory by interleaving.
2862                         */
2863                        if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
2864                                nr = alloc_pages_bulk_array_mempolicy(gfp,
2865                                                        nr_pages_request,
2866                                                        pages + nr_allocated);
2867
2868                        else
2869                                nr = alloc_pages_bulk_array_node(gfp, nid,
2870                                                        nr_pages_request,
2871                                                        pages + nr_allocated);
2872
2873                        nr_allocated += nr;
2874                        cond_resched();
2875
2876                        /*
2877                         * If zero or pages were obtained partly,
2878                         * fallback to a single page allocator.
2879                         */
2880                        if (nr != nr_pages_request)
2881                                break;
2882                }
2883        } else
2884                /*
2885                 * Compound pages required for remap_vmalloc_page if
2886                 * high-order pages.
2887                 */
2888                gfp |= __GFP_COMP;
2889
2890        /* High-order pages or fallback path if "bulk" fails. */
2891
2892        while (nr_allocated < nr_pages) {
2893                if (fatal_signal_pending(current))
2894                        break;
2895
2896                if (nid == NUMA_NO_NODE)
2897                        page = alloc_pages(gfp, order);
2898                else
2899                        page = alloc_pages_node(nid, gfp, order);
2900                if (unlikely(!page))
2901                        break;
2902
2903                /*
2904                 * Careful, we allocate and map page-order pages, but
2905                 * tracking is done per PAGE_SIZE page so as to keep the
2906                 * vm_struct APIs independent of the physical/mapped size.
2907                 */
2908                for (i = 0; i < (1U << order); i++)
2909                        pages[nr_allocated + i] = page + i;
2910
2911                cond_resched();
2912                nr_allocated += 1U << order;
2913        }
2914
2915        return nr_allocated;
2916}
2917
2918static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2919                                 pgprot_t prot, unsigned int page_shift,
2920                                 int node)
2921{
2922        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2923        const gfp_t orig_gfp_mask = gfp_mask;
2924        unsigned long addr = (unsigned long)area->addr;
2925        unsigned long size = get_vm_area_size(area);
2926        unsigned long array_size;
2927        unsigned int nr_small_pages = size >> PAGE_SHIFT;
2928        unsigned int page_order;
2929
2930        array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2931        gfp_mask |= __GFP_NOWARN;
2932        if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2933                gfp_mask |= __GFP_HIGHMEM;
2934
2935        /* Please note that the recursion is strictly bounded. */
2936        if (array_size > PAGE_SIZE) {
2937                area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2938                                        area->caller);
2939        } else {
2940                area->pages = kmalloc_node(array_size, nested_gfp, node);
2941        }
2942
2943        if (!area->pages) {
2944                warn_alloc(orig_gfp_mask, NULL,
2945                        "vmalloc error: size %lu, failed to allocated page array size %lu",
2946                        nr_small_pages * PAGE_SIZE, array_size);
2947                free_vm_area(area);
2948                return NULL;
2949        }
2950
2951        set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2952        page_order = vm_area_page_order(area);
2953
2954        area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
2955                page_order, nr_small_pages, area->pages);
2956
2957        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2958
2959        /*
2960         * If not enough pages were obtained to accomplish an
2961         * allocation request, free them via __vfree() if any.
2962         */
2963        if (area->nr_pages != nr_small_pages) {
2964                warn_alloc(orig_gfp_mask, NULL,
2965                        "vmalloc error: size %lu, page order %u, failed to allocate pages",
2966                        area->nr_pages * PAGE_SIZE, page_order);
2967                goto fail;
2968        }
2969
2970        if (vmap_pages_range(addr, addr + size, prot, area->pages,
2971                        page_shift) < 0) {
2972                warn_alloc(orig_gfp_mask, NULL,
2973                        "vmalloc error: size %lu, failed to map pages",
2974                        area->nr_pages * PAGE_SIZE);
2975                goto fail;
2976        }
2977
2978        return area->addr;
2979
2980fail:
2981        __vfree(area->addr);
2982        return NULL;
2983}
2984
2985/**
2986 * __vmalloc_node_range - allocate virtually contiguous memory
2987 * @size:                 allocation size
2988 * @align:                desired alignment
2989 * @start:                vm area range start
2990 * @end:                  vm area range end
2991 * @gfp_mask:             flags for the page level allocator
2992 * @prot:                 protection mask for the allocated pages
2993 * @vm_flags:             additional vm area flags (e.g. %VM_NO_GUARD)
2994 * @node:                 node to use for allocation or NUMA_NO_NODE
2995 * @caller:               caller's return address
2996 *
2997 * Allocate enough pages to cover @size from the page level
2998 * allocator with @gfp_mask flags. Please note that the full set of gfp
2999 * flags are not supported. GFP_KERNEL would be a preferred allocation mode
3000 * but GFP_NOFS and GFP_NOIO are supported as well. Zone modifiers are not
3001 * supported. From the reclaim modifiers__GFP_DIRECT_RECLAIM is required (aka
3002 * GFP_NOWAIT is not supported) and only __GFP_NOFAIL is supported (aka
3003 * __GFP_NORETRY and __GFP_RETRY_MAYFAIL are not supported).
3004 * __GFP_NOWARN can be used to suppress error messages about failures.
3005 *
3006 * Map them into contiguous kernel virtual space, using a pagetable
3007 * protection of @prot.
3008 *
3009 * Return: the address of the area or %NULL on failure
3010 */
3011void *__vmalloc_node_range(unsigned long size, unsigned long align,
3012                        unsigned long start, unsigned long end, gfp_t gfp_mask,
3013                        pgprot_t prot, unsigned long vm_flags, int node,
3014                        const void *caller)
3015{
3016        struct vm_struct *area;
3017        void *addr;
3018        unsigned long real_size = size;
3019        unsigned long real_align = align;
3020        unsigned int shift = PAGE_SHIFT;
3021
3022        if (WARN_ON_ONCE(!size))
3023                return NULL;
3024
3025        if ((size >> PAGE_SHIFT) > totalram_pages()) {
3026                warn_alloc(gfp_mask, NULL,
3027                        "vmalloc error: size %lu, exceeds total pages",
3028                        real_size);
3029                return NULL;
3030        }
3031
3032        if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
3033                unsigned long size_per_node;
3034
3035                /*
3036                 * Try huge pages. Only try for PAGE_KERNEL allocations,
3037                 * others like modules don't yet expect huge pages in
3038                 * their allocations due to apply_to_page_range not
3039                 * supporting them.
3040                 */
3041
3042                size_per_node = size;
3043                if (node == NUMA_NO_NODE)
3044                        size_per_node /= num_online_nodes();
3045                if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3046                        shift = PMD_SHIFT;
3047                else
3048                        shift = arch_vmap_pte_supported_shift(size_per_node);
3049
3050                align = max(real_align, 1UL << shift);
3051                size = ALIGN(real_size, 1UL << shift);
3052        }
3053
3054again:
3055        area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3056                                  VM_UNINITIALIZED | vm_flags, start, end, node,
3057                                  gfp_mask, caller);
3058        if (!area) {
3059                warn_alloc(gfp_mask, NULL,
3060                        "vmalloc error: size %lu, vm_struct allocation failed",
3061                        real_size);
3062                goto fail;
3063        }
3064
3065        addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3066        if (!addr)
3067                goto fail;
3068
3069        /*
3070         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3071         * flag. It means that vm_struct is not fully initialized.
3072         * Now, it is fully initialized, so remove this flag here.
3073         */
3074        clear_vm_uninitialized_flag(area);
3075
3076        size = PAGE_ALIGN(size);
3077        kmemleak_vmalloc(area, size, gfp_mask);
3078
3079        return addr;
3080
3081fail:
3082        if (shift > PAGE_SHIFT) {
3083                shift = PAGE_SHIFT;
3084                align = real_align;
3085                size = real_size;
3086                goto again;
3087        }
3088
3089        return NULL;
3090}
3091
3092/**
3093 * __vmalloc_node - allocate virtually contiguous memory
3094 * @size:           allocation size
3095 * @align:          desired alignment
3096 * @gfp_mask:       flags for the page level allocator
3097 * @node:           node to use for allocation or NUMA_NO_NODE
3098 * @caller:         caller's return address
3099 *
3100 * Allocate enough pages to cover @size from the page level allocator with
3101 * @gfp_mask flags.  Map them into contiguous kernel virtual space.
3102 *
3103 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3104 * and __GFP_NOFAIL are not supported
3105 *
3106 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3107 * with mm people.
3108 *
3109 * Return: pointer to the allocated memory or %NULL on error
3110 */
3111void *__vmalloc_node(unsigned long size, unsigned long align,
3112                            gfp_t gfp_mask, int node, const void *caller)
3113{
3114        return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3115                                gfp_mask, PAGE_KERNEL, 0, node, caller);
3116}
3117/*
3118 * This is only for performance analysis of vmalloc and stress purpose.
3119 * It is required by vmalloc test module, therefore do not use it other
3120 * than that.
3121 */
3122#ifdef CONFIG_TEST_VMALLOC_MODULE
3123EXPORT_SYMBOL_GPL(__vmalloc_node);
3124#endif
3125
3126void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3127{
3128        return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3129                                __builtin_return_address(0));
3130}
3131EXPORT_SYMBOL(__vmalloc);
3132
3133/**
3134 * vmalloc - allocate virtually contiguous memory
3135 * @size:    allocation size
3136 *
3137 * Allocate enough pages to cover @size from the page level
3138 * allocator and map them into contiguous kernel virtual space.
3139 *
3140 * For tight control over page level allocator and protection flags
3141 * use __vmalloc() instead.
3142 *
3143 * Return: pointer to the allocated memory or %NULL on error
3144 */
3145void *vmalloc(unsigned long size)
3146{
3147        return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3148                                __builtin_return_address(0));
3149}
3150EXPORT_SYMBOL(vmalloc);
3151
3152/**
3153 * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3154 * @size:    allocation size
3155 *
3156 * Allocate enough non-huge pages to cover @size from the page level
3157 * allocator and map them into contiguous kernel virtual space.
3158 *
3159 * Return: pointer to the allocated memory or %NULL on error
3160 */
3161void *vmalloc_no_huge(unsigned long size)
3162{
3163        return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3164                                    GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
3165                                    NUMA_NO_NODE, __builtin_return_address(0));
3166}
3167EXPORT_SYMBOL(vmalloc_no_huge);
3168
3169/**
3170 * vzalloc - allocate virtually contiguous memory with zero fill
3171 * @size:    allocation size
3172 *
3173 * Allocate enough pages to cover @size from the page level
3174 * allocator and map them into contiguous kernel virtual space.
3175 * The memory allocated is set to zero.
3176 *
3177 * For tight control over page level allocator and protection flags
3178 * use __vmalloc() instead.
3179 *
3180 * Return: pointer to the allocated memory or %NULL on error
3181 */
3182void *vzalloc(unsigned long size)
3183{
3184        return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3185                                __builtin_return_address(0));
3186}
3187EXPORT_SYMBOL(vzalloc);
3188
3189/**
3190 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3191 * @size: allocation size
3192 *
3193 * The resulting memory area is zeroed so it can be mapped to userspace
3194 * without leaking data.
3195 *
3196 * Return: pointer to the allocated memory or %NULL on error
3197 */
3198void *vmalloc_user(unsigned long size)
3199{
3200        return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3201                                    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3202                                    VM_USERMAP, NUMA_NO_NODE,
3203                                    __builtin_return_address(0));
3204}
3205EXPORT_SYMBOL(vmalloc_user);
3206
3207/**
3208 * vmalloc_node - allocate memory on a specific node
3209 * @size:         allocation size
3210 * @node:         numa node
3211 *
3212 * Allocate enough pages to cover @size from the page level
3213 * allocator and map them into contiguous kernel virtual space.
3214 *
3215 * For tight control over page level allocator and protection flags
3216 * use __vmalloc() instead.
3217 *
3218 * Return: pointer to the allocated memory or %NULL on error
3219 */
3220void *vmalloc_node(unsigned long size, int node)
3221{
3222        return __vmalloc_node(size, 1, GFP_KERNEL, node,
3223                        __builtin_return_address(0));
3224}
3225EXPORT_SYMBOL(vmalloc_node);
3226
3227/**
3228 * vzalloc_node - allocate memory on a specific node with zero fill
3229 * @size:       allocation size
3230 * @node:       numa node
3231 *
3232 * Allocate enough pages to cover @size from the page level
3233 * allocator and map them into contiguous kernel virtual space.
3234 * The memory allocated is set to zero.
3235 *
3236 * Return: pointer to the allocated memory or %NULL on error
3237 */
3238void *vzalloc_node(unsigned long size, int node)
3239{
3240        return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3241                                __builtin_return_address(0));
3242}
3243EXPORT_SYMBOL(vzalloc_node);
3244
3245#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3246#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3247#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3248#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3249#else
3250/*
3251 * 64b systems should always have either DMA or DMA32 zones. For others
3252 * GFP_DMA32 should do the right thing and use the normal zone.
3253 */
3254#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3255#endif
3256
3257/**
3258 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3259 * @size:       allocation size
3260 *
3261 * Allocate enough 32bit PA addressable pages to cover @size from the
3262 * page level allocator and map them into contiguous kernel virtual space.
3263 *
3264 * Return: pointer to the allocated memory or %NULL on error
3265 */
3266void *vmalloc_32(unsigned long size)
3267{
3268        return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3269                        __builtin_return_address(0));
3270}
3271EXPORT_SYMBOL(vmalloc_32);
3272
3273/**
3274 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3275 * @size:            allocation size
3276 *
3277 * The resulting memory area is 32bit addressable and zeroed so it can be
3278 * mapped to userspace without leaking data.
3279 *
3280 * Return: pointer to the allocated memory or %NULL on error
3281 */
3282void *vmalloc_32_user(unsigned long size)
3283{
3284        return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3285                                    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3286                                    VM_USERMAP, NUMA_NO_NODE,
3287                                    __builtin_return_address(0));
3288}
3289EXPORT_SYMBOL(vmalloc_32_user);
3290
3291/*
3292 * small helper routine , copy contents to buf from addr.
3293 * If the page is not present, fill zero.
3294 */
3295
3296static int aligned_vread(char *buf, char *addr, unsigned long count)
3297{
3298        struct page *p;
3299        int copied = 0;
3300
3301        while (count) {
3302                unsigned long offset, length;
3303
3304                offset = offset_in_page(addr);
3305                length = PAGE_SIZE - offset;
3306                if (length > count)
3307                        length = count;
3308                p = vmalloc_to_page(addr);
3309                /*
3310                 * To do safe access to this _mapped_ area, we need
3311                 * lock. But adding lock here means that we need to add
3312                 * overhead of vmalloc()/vfree() calls for this _debug_
3313                 * interface, rarely used. Instead of that, we'll use
3314                 * kmap() and get small overhead in this access function.
3315                 */
3316                if (p) {
3317                        /* We can expect USER0 is not used -- see vread() */
3318                        void *map = kmap_atomic(p);
3319                        memcpy(buf, map + offset, length);
3320                        kunmap_atomic(map);
3321                } else
3322                        memset(buf, 0, length);
3323
3324                addr += length;
3325                buf += length;
3326                copied += length;
3327                count -= length;
3328        }
3329        return copied;
3330}
3331
3332/**
3333 * vread() - read vmalloc area in a safe way.
3334 * @buf:     buffer for reading data
3335 * @addr:    vm address.
3336 * @count:   number of bytes to be read.
3337 *
3338 * This function checks that addr is a valid vmalloc'ed area, and
3339 * copy data from that area to a given buffer. If the given memory range
3340 * of [addr...addr+count) includes some valid address, data is copied to
3341 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3342 * IOREMAP area is treated as memory hole and no copy is done.
3343 *
3344 * If [addr...addr+count) doesn't includes any intersects with alive
3345 * vm_struct area, returns 0. @buf should be kernel's buffer.
3346 *
3347 * Note: In usual ops, vread() is never necessary because the caller
3348 * should know vmalloc() area is valid and can use memcpy().
3349 * This is for routines which have to access vmalloc area without
3350 * any information, as /proc/kcore.
3351 *
3352 * Return: number of bytes for which addr and buf should be increased
3353 * (same number as @count) or %0 if [addr...addr+count) doesn't
3354 * include any intersection with valid vmalloc area
3355 */
3356long vread(char *buf, char *addr, unsigned long count)
3357{
3358        struct vmap_area *va;
3359        struct vm_struct *vm;
3360        char *vaddr, *buf_start = buf;
3361        unsigned long buflen = count;
3362        unsigned long n;
3363
3364        /* Don't allow overflow */
3365        if ((unsigned long) addr + count < count)
3366                count = -(unsigned long) addr;
3367
3368        spin_lock(&vmap_area_lock);
3369        va = find_vmap_area_exceed_addr((unsigned long)addr);
3370        if (!va)
3371                goto finished;
3372
3373        /* no intersects with alive vmap_area */
3374        if ((unsigned long)addr + count <= va->va_start)
3375                goto finished;
3376
3377        list_for_each_entry_from(va, &vmap_area_list, list) {
3378                if (!count)
3379                        break;
3380
3381                if (!va->vm)
3382                        continue;
3383
3384                vm = va->vm;
3385                vaddr = (char *) vm->addr;
3386                if (addr >= vaddr + get_vm_area_size(vm))
3387                        continue;
3388                while (addr < vaddr) {
3389                        if (count == 0)
3390                                goto finished;
3391                        *buf = '\0';
3392                        buf++;
3393                        addr++;
3394                        count--;
3395                }
3396                n = vaddr + get_vm_area_size(vm) - addr;
3397                if (n > count)
3398                        n = count;
3399                if (!(vm->flags & VM_IOREMAP))
3400                        aligned_vread(buf, addr, n);
3401                else /* IOREMAP area is treated as memory hole */
3402                        memset(buf, 0, n);
3403                buf += n;
3404                addr += n;
3405                count -= n;
3406        }
3407finished:
3408        spin_unlock(&vmap_area_lock);
3409
3410        if (buf == buf_start)
3411                return 0;
3412        /* zero-fill memory holes */
3413        if (buf != buf_start + buflen)
3414                memset(buf, 0, buflen - (buf - buf_start));
3415
3416        return buflen;
3417}
3418
3419/**
3420 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3421 * @vma:                vma to cover
3422 * @uaddr:              target user address to start at
3423 * @kaddr:              virtual address of vmalloc kernel memory
3424 * @pgoff:              offset from @kaddr to start at
3425 * @size:               size of map area
3426 *
3427 * Returns:     0 for success, -Exxx on failure
3428 *
3429 * This function checks that @kaddr is a valid vmalloc'ed area,
3430 * and that it is big enough to cover the range starting at
3431 * @uaddr in @vma. Will return failure if that criteria isn't
3432 * met.
3433 *
3434 * Similar to remap_pfn_range() (see mm/memory.c)
3435 */
3436int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3437                                void *kaddr, unsigned long pgoff,
3438                                unsigned long size)
3439{
3440        struct vm_struct *area;
3441        unsigned long off;
3442        unsigned long end_index;
3443
3444        if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3445                return -EINVAL;
3446
3447        size = PAGE_ALIGN(size);
3448
3449        if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3450                return -EINVAL;
3451
3452        area = find_vm_area(kaddr);
3453        if (!area)
3454                return -EINVAL;
3455
3456        if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3457                return -EINVAL;
3458
3459        if (check_add_overflow(size, off, &end_index) ||
3460            end_index > get_vm_area_size(area))
3461                return -EINVAL;
3462        kaddr += off;
3463
3464        do {
3465                struct page *page = vmalloc_to_page(kaddr);
3466                int ret;
3467
3468                ret = vm_insert_page(vma, uaddr, page);
3469                if (ret)
3470                        return ret;
3471
3472                uaddr += PAGE_SIZE;
3473                kaddr += PAGE_SIZE;
3474                size -= PAGE_SIZE;
3475        } while (size > 0);
3476
3477        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3478
3479        return 0;
3480}
3481
3482/**
3483 * remap_vmalloc_range - map vmalloc pages to userspace
3484 * @vma:                vma to cover (map full range of vma)
3485 * @addr:               vmalloc memory
3486 * @pgoff:              number of pages into addr before first page to map
3487 *
3488 * Returns:     0 for success, -Exxx on failure
3489 *
3490 * This function checks that addr is a valid vmalloc'ed area, and
3491 * that it is big enough to cover the vma. Will return failure if
3492 * that criteria isn't met.
3493 *
3494 * Similar to remap_pfn_range() (see mm/memory.c)
3495 */
3496int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3497                                                unsigned long pgoff)
3498{
3499        return remap_vmalloc_range_partial(vma, vma->vm_start,
3500                                           addr, pgoff,
3501                                           vma->vm_end - vma->vm_start);
3502}
3503EXPORT_SYMBOL(remap_vmalloc_range);
3504
3505void free_vm_area(struct vm_struct *area)
3506{
3507        struct vm_struct *ret;
3508        ret = remove_vm_area(area->addr);
3509        BUG_ON(ret != area);
3510        kfree(area);
3511}
3512EXPORT_SYMBOL_GPL(free_vm_area);
3513
3514#ifdef CONFIG_SMP
3515static struct vmap_area *node_to_va(struct rb_node *n)
3516{
3517        return rb_entry_safe(n, struct vmap_area, rb_node);
3518}
3519
3520/**
3521 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3522 * @addr: target address
3523 *
3524 * Returns: vmap_area if it is found. If there is no such area
3525 *   the first highest(reverse order) vmap_area is returned
3526 *   i.e. va->va_start < addr && va->va_end < addr or NULL
3527 *   if there are no any areas before @addr.
3528 */
3529static struct vmap_area *
3530pvm_find_va_enclose_addr(unsigned long addr)
3531{
3532        struct vmap_area *va, *tmp;
3533        struct rb_node *n;
3534
3535        n = free_vmap_area_root.rb_node;
3536        va = NULL;
3537
3538        while (n) {
3539                tmp = rb_entry(n, struct vmap_area, rb_node);
3540                if (tmp->va_start <= addr) {
3541                        va = tmp;
3542                        if (tmp->va_end >= addr)
3543                                break;
3544
3545                        n = n->rb_right;
3546                } else {
3547                        n = n->rb_left;
3548                }
3549        }
3550
3551        return va;
3552}
3553
3554/**
3555 * pvm_determine_end_from_reverse - find the highest aligned address
3556 * of free block below VMALLOC_END
3557 * @va:
3558 *   in - the VA we start the search(reverse order);
3559 *   out - the VA with the highest aligned end address.
3560 * @align: alignment for required highest address
3561 *
3562 * Returns: determined end address within vmap_area
3563 */
3564static unsigned long
3565pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3566{
3567        unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3568        unsigned long addr;
3569
3570        if (likely(*va)) {
3571                list_for_each_entry_from_reverse((*va),
3572                                &free_vmap_area_list, list) {
3573                        addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3574                        if ((*va)->va_start < addr)
3575                                return addr;
3576                }
3577        }
3578
3579        return 0;
3580}
3581
3582/**
3583 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3584 * @offsets: array containing offset of each area
3585 * @sizes: array containing size of each area
3586 * @nr_vms: the number of areas to allocate
3587 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3588 *
3589 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3590 *          vm_structs on success, %NULL on failure
3591 *
3592 * Percpu allocator wants to use congruent vm areas so that it can
3593 * maintain the offsets among percpu areas.  This function allocates
3594 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3595 * be scattered pretty far, distance between two areas easily going up
3596 * to gigabytes.  To avoid interacting with regular vmallocs, these
3597 * areas are allocated from top.
3598 *
3599 * Despite its complicated look, this allocator is rather simple. It
3600 * does everything top-down and scans free blocks from the end looking
3601 * for matching base. While scanning, if any of the areas do not fit the
3602 * base address is pulled down to fit the area. Scanning is repeated till
3603 * all the areas fit and then all necessary data structures are inserted
3604 * and the result is returned.
3605 */
3606struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3607                                     const size_t *sizes, int nr_vms,
3608                                     size_t align)
3609{
3610        const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3611        const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3612        struct vmap_area **vas, *va;
3613        struct vm_struct **vms;
3614        int area, area2, last_area, term_area;
3615        unsigned long base, start, size, end, last_end, orig_start, orig_end;
3616        bool purged = false;
3617        enum fit_type type;
3618
3619        /* verify parameters and allocate data structures */
3620        BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3621        for (last_area = 0, area = 0; area < nr_vms; area++) {
3622                start = offsets[area];
3623                end = start + sizes[area];
3624
3625                /* is everything aligned properly? */
3626                BUG_ON(!IS_ALIGNED(offsets[area], align));
3627                BUG_ON(!IS_ALIGNED(sizes[area], align));
3628
3629                /* detect the area with the highest address */
3630                if (start > offsets[last_area])
3631                        last_area = area;
3632
3633                for (area2 = area + 1; area2 < nr_vms; area2++) {
3634                        unsigned long start2 = offsets[area2];
3635                        unsigned long end2 = start2 + sizes[area2];
3636
3637                        BUG_ON(start2 < end && start < end2);
3638                }
3639        }
3640        last_end = offsets[last_area] + sizes[last_area];
3641
3642        if (vmalloc_end - vmalloc_start < last_end) {
3643                WARN_ON(true);
3644                return NULL;
3645        }
3646
3647        vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3648        vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3649        if (!vas || !vms)
3650                goto err_free2;
3651
3652        for (area = 0; area < nr_vms; area++) {
3653                vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3654                vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3655                if (!vas[area] || !vms[area])
3656                        goto err_free;
3657        }
3658retry:
3659        spin_lock(&free_vmap_area_lock);
3660
3661        /* start scanning - we scan from the top, begin with the last area */
3662        area = term_area = last_area;
3663        start = offsets[area];
3664        end = start + sizes[area];
3665
3666        va = pvm_find_va_enclose_addr(vmalloc_end);
3667        base = pvm_determine_end_from_reverse(&va, align) - end;
3668
3669        while (true) {
3670                /*
3671                 * base might have underflowed, add last_end before
3672                 * comparing.
3673                 */
3674                if (base + last_end < vmalloc_start + last_end)
3675                        goto overflow;
3676
3677                /*
3678                 * Fitting base has not been found.
3679                 */
3680                if (va == NULL)
3681                        goto overflow;
3682
3683                /*
3684                 * If required width exceeds current VA block, move
3685                 * base downwards and then recheck.
3686                 */
3687                if (base + end > va->va_end) {
3688                        base = pvm_determine_end_from_reverse(&va, align) - end;
3689                        term_area = area;
3690                        continue;
3691                }
3692
3693                /*
3694                 * If this VA does not fit, move base downwards and recheck.
3695                 */
3696                if (base + start < va->va_start) {
3697                        va = node_to_va(rb_prev(&va->rb_node));
3698                        base = pvm_determine_end_from_reverse(&va, align) - end;
3699                        term_area = area;
3700                        continue;
3701                }
3702
3703                /*
3704                 * This area fits, move on to the previous one.  If
3705                 * the previous one is the terminal one, we're done.
3706                 */
3707                area = (area + nr_vms - 1) % nr_vms;
3708                if (area == term_area)
3709                        break;
3710
3711                start = offsets[area];
3712                end = start + sizes[area];
3713                va = pvm_find_va_enclose_addr(base + end);
3714        }
3715
3716        /* we've found a fitting base, insert all va's */
3717        for (area = 0; area < nr_vms; area++) {
3718                int ret;
3719
3720                start = base + offsets[area];
3721                size = sizes[area];
3722
3723                va = pvm_find_va_enclose_addr(start);
3724                if (WARN_ON_ONCE(va == NULL))
3725                        /* It is a BUG(), but trigger recovery instead. */
3726                        goto recovery;
3727
3728                type = classify_va_fit_type(va, start, size);
3729                if (WARN_ON_ONCE(type == NOTHING_FIT))
3730                        /* It is a BUG(), but trigger recovery instead. */
3731                        goto recovery;
3732
3733                ret = adjust_va_to_fit_type(va, start, size, type);
3734                if (unlikely(ret))
3735                        goto recovery;
3736
3737                /* Allocated area. */
3738                va = vas[area];
3739                va->va_start = start;
3740                va->va_end = start + size;
3741        }
3742
3743        spin_unlock(&free_vmap_area_lock);
3744
3745        /* populate the kasan shadow space */
3746        for (area = 0; area < nr_vms; area++) {
3747                if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3748                        goto err_free_shadow;
3749
3750                kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3751                                       sizes[area]);
3752        }
3753
3754        /* insert all vm's */
3755        spin_lock(&vmap_area_lock);
3756        for (area = 0; area < nr_vms; area++) {
3757                insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3758
3759                setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3760                                 pcpu_get_vm_areas);
3761        }
3762        spin_unlock(&vmap_area_lock);
3763
3764        kfree(vas);
3765        return vms;
3766
3767recovery:
3768        /*
3769         * Remove previously allocated areas. There is no
3770         * need in removing these areas from the busy tree,
3771         * because they are inserted only on the final step
3772         * and when pcpu_get_vm_areas() is success.
3773         */
3774        while (area--) {
3775                orig_start = vas[area]->va_start;
3776                orig_end = vas[area]->va_end;
3777                va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3778                                &free_vmap_area_list);
3779                if (va)
3780                        kasan_release_vmalloc(orig_start, orig_end,
3781                                va->va_start, va->va_end);
3782                vas[area] = NULL;
3783        }
3784
3785overflow:
3786        spin_unlock(&free_vmap_area_lock);
3787        if (!purged) {
3788                purge_vmap_area_lazy();
3789                purged = true;
3790
3791                /* Before "retry", check if we recover. */
3792                for (area = 0; area < nr_vms; area++) {
3793                        if (vas[area])
3794                                continue;
3795
3796                        vas[area] = kmem_cache_zalloc(
3797                                vmap_area_cachep, GFP_KERNEL);
3798                        if (!vas[area])
3799                                goto err_free;
3800                }
3801
3802                goto retry;
3803        }
3804
3805err_free:
3806        for (area = 0; area < nr_vms; area++) {
3807                if (vas[area])
3808                        kmem_cache_free(vmap_area_cachep, vas[area]);
3809
3810                kfree(vms[area]);
3811        }
3812err_free2:
3813        kfree(vas);
3814        kfree(vms);
3815        return NULL;
3816
3817err_free_shadow:
3818        spin_lock(&free_vmap_area_lock);
3819        /*
3820         * We release all the vmalloc shadows, even the ones for regions that
3821         * hadn't been successfully added. This relies on kasan_release_vmalloc
3822         * being able to tolerate this case.
3823         */
3824        for (area = 0; area < nr_vms; area++) {
3825                orig_start = vas[area]->va_start;
3826                orig_end = vas[area]->va_end;
3827                va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3828                                &free_vmap_area_list);
3829                if (va)
3830                        kasan_release_vmalloc(orig_start, orig_end,
3831                                va->va_start, va->va_end);
3832                vas[area] = NULL;
3833                kfree(vms[area]);
3834        }
3835        spin_unlock(&free_vmap_area_lock);
3836        kfree(vas);
3837        kfree(vms);
3838        return NULL;
3839}
3840
3841/**
3842 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3843 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3844 * @nr_vms: the number of allocated areas
3845 *
3846 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3847 */
3848void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3849{
3850        int i;
3851
3852        for (i = 0; i < nr_vms; i++)
3853                free_vm_area(vms[i]);
3854        kfree(vms);
3855}
3856#endif  /* CONFIG_SMP */
3857
3858#ifdef CONFIG_PRINTK
3859bool vmalloc_dump_obj(void *object)
3860{
3861        struct vm_struct *vm;
3862        void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3863
3864        vm = find_vm_area(objp);
3865        if (!vm)
3866                return false;
3867        pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3868                vm->nr_pages, (unsigned long)vm->addr, vm->caller);
3869        return true;
3870}
3871#endif
3872
3873#ifdef CONFIG_PROC_FS
3874static void *s_start(struct seq_file *m, loff_t *pos)
3875        __acquires(&vmap_purge_lock)
3876        __acquires(&vmap_area_lock)
3877{
3878        mutex_lock(&vmap_purge_lock);
3879        spin_lock(&vmap_area_lock);
3880
3881        return seq_list_start(&vmap_area_list, *pos);
3882}
3883
3884static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3885{
3886        return seq_list_next(p, &vmap_area_list, pos);
3887}
3888
3889static void s_stop(struct seq_file *m, void *p)
3890        __releases(&vmap_area_lock)
3891        __releases(&vmap_purge_lock)
3892{
3893        spin_unlock(&vmap_area_lock);
3894        mutex_unlock(&vmap_purge_lock);
3895}
3896
3897static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3898{
3899        if (IS_ENABLED(CONFIG_NUMA)) {
3900                unsigned int nr, *counters = m->private;
3901                unsigned int step = 1U << vm_area_page_order(v);
3902
3903                if (!counters)
3904                        return;
3905
3906                if (v->flags & VM_UNINITIALIZED)
3907                        return;
3908                /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3909                smp_rmb();
3910
3911                memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3912
3913                for (nr = 0; nr < v->nr_pages; nr += step)
3914                        counters[page_to_nid(v->pages[nr])] += step;
3915                for_each_node_state(nr, N_HIGH_MEMORY)
3916                        if (counters[nr])
3917                                seq_printf(m, " N%u=%u", nr, counters[nr]);
3918        }
3919}
3920
3921static void show_purge_info(struct seq_file *m)
3922{
3923        struct vmap_area *va;
3924
3925        spin_lock(&purge_vmap_area_lock);
3926        list_for_each_entry(va, &purge_vmap_area_list, list) {
3927                seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3928                        (void *)va->va_start, (void *)va->va_end,
3929                        va->va_end - va->va_start);
3930        }
3931        spin_unlock(&purge_vmap_area_lock);
3932}
3933
3934static int s_show(struct seq_file *m, void *p)
3935{
3936        struct vmap_area *va;
3937        struct vm_struct *v;
3938
3939        va = list_entry(p, struct vmap_area, list);
3940
3941        /*
3942         * s_show can encounter race with remove_vm_area, !vm on behalf
3943         * of vmap area is being tear down or vm_map_ram allocation.
3944         */
3945        if (!va->vm) {
3946                seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3947                        (void *)va->va_start, (void *)va->va_end,
3948                        va->va_end - va->va_start);
3949
3950                goto final;
3951        }
3952
3953        v = va->vm;
3954
3955        seq_printf(m, "0x%pK-0x%pK %7ld",
3956                v->addr, v->addr + v->size, v->size);
3957
3958        if (v->caller)
3959                seq_printf(m, " %pS", v->caller);
3960
3961        if (v->nr_pages)
3962                seq_printf(m, " pages=%d", v->nr_pages);
3963
3964        if (v->phys_addr)
3965                seq_printf(m, " phys=%pa", &v->phys_addr);
3966
3967        if (v->flags & VM_IOREMAP)
3968                seq_puts(m, " ioremap");
3969
3970        if (v->flags & VM_ALLOC)
3971                seq_puts(m, " vmalloc");
3972
3973        if (v->flags & VM_MAP)
3974                seq_puts(m, " vmap");
3975
3976        if (v->flags & VM_USERMAP)
3977                seq_puts(m, " user");
3978
3979        if (v->flags & VM_DMA_COHERENT)
3980                seq_puts(m, " dma-coherent");
3981
3982        if (is_vmalloc_addr(v->pages))
3983                seq_puts(m, " vpages");
3984
3985        show_numa_info(m, v);
3986        seq_putc(m, '\n');
3987
3988        /*
3989         * As a final step, dump "unpurged" areas.
3990         */
3991final:
3992        if (list_is_last(&va->list, &vmap_area_list))
3993                show_purge_info(m);
3994
3995        return 0;
3996}
3997
3998static const struct seq_operations vmalloc_op = {
3999        .start = s_start,
4000        .next = s_next,
4001        .stop = s_stop,
4002        .show = s_show,
4003};
4004
4005static int __init proc_vmalloc_init(void)
4006{
4007        if (IS_ENABLED(CONFIG_NUMA))
4008                proc_create_seq_private("vmallocinfo", 0400, NULL,
4009                                &vmalloc_op,
4010                                nr_node_ids * sizeof(unsigned int), NULL);
4011        else
4012                proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
4013        return 0;
4014}
4015module_init(proc_vmalloc_init);
4016
4017#endif
4018