linux/mm/page_ext.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/mm.h>
   3#include <linux/mmzone.h>
   4#include <linux/memblock.h>
   5#include <linux/page_ext.h>
   6#include <linux/memory.h>
   7#include <linux/vmalloc.h>
   8#include <linux/kmemleak.h>
   9#include <linux/page_owner.h>
  10#include <linux/page_idle.h>
  11#include <linux/page_table_check.h>
  12#include <linux/rcupdate.h>
  13#include <linux/pgalloc_tag.h>
  14
  15/*
  16 * struct page extension
  17 *
  18 * This is the feature to manage memory for extended data per page.
  19 *
  20 * Until now, we must modify struct page itself to store extra data per page.
  21 * This requires rebuilding the kernel and it is really time consuming process.
  22 * And, sometimes, rebuild is impossible due to third party module dependency.
  23 * At last, enlarging struct page could cause un-wanted system behaviour change.
  24 *
  25 * This feature is intended to overcome above mentioned problems. This feature
  26 * allocates memory for extended data per page in certain place rather than
  27 * the struct page itself. This memory can be accessed by the accessor
  28 * functions provided by this code. During the boot process, it checks whether
  29 * allocation of huge chunk of memory is needed or not. If not, it avoids
  30 * allocating memory at all. With this advantage, we can include this feature
  31 * into the kernel in default and can avoid rebuild and solve related problems.
  32 *
  33 * To help these things to work well, there are two callbacks for clients. One
  34 * is the need callback which is mandatory if user wants to avoid useless
  35 * memory allocation at boot-time. The other is optional, init callback, which
  36 * is used to do proper initialization after memory is allocated.
  37 *
  38 * The need callback is used to decide whether extended memory allocation is
  39 * needed or not. Sometimes users want to deactivate some features in this
  40 * boot and extra memory would be unnecessary. In this case, to avoid
  41 * allocating huge chunk of memory, each clients represent their need of
  42 * extra memory through the need callback. If one of the need callbacks
  43 * returns true, it means that someone needs extra memory so that
  44 * page extension core should allocates memory for page extension. If
  45 * none of need callbacks return true, memory isn't needed at all in this boot
  46 * and page extension core can skip to allocate memory. As result,
  47 * none of memory is wasted.
  48 *
  49 * When need callback returns true, page_ext checks if there is a request for
  50 * extra memory through size in struct page_ext_operations. If it is non-zero,
  51 * extra space is allocated for each page_ext entry and offset is returned to
  52 * user through offset in struct page_ext_operations.
  53 *
  54 * The init callback is used to do proper initialization after page extension
  55 * is completely initialized. In sparse memory system, extra memory is
  56 * allocated some time later than memmap is allocated. In other words, lifetime
  57 * of memory for page extension isn't same with memmap for struct page.
  58 * Therefore, clients can't store extra data until page extension is
  59 * initialized, even if pages are allocated and used freely. This could
  60 * cause inadequate state of extra data per page, so, to prevent it, client
  61 * can utilize this callback to initialize the state of it correctly.
  62 */
  63
  64#ifdef CONFIG_SPARSEMEM
  65#define PAGE_EXT_INVALID       (0x1)
  66#endif
  67
  68#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
  69static bool need_page_idle(void)
  70{
  71        return true;
  72}
  73static struct page_ext_operations page_idle_ops __initdata = {
  74        .need = need_page_idle,
  75        .need_shared_flags = true,
  76};
  77#endif
  78
  79static struct page_ext_operations *page_ext_ops[] __initdata = {
  80#ifdef CONFIG_PAGE_OWNER
  81        &page_owner_ops,
  82#endif
  83#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
  84        &page_idle_ops,
  85#endif
  86#ifdef CONFIG_MEM_ALLOC_PROFILING
  87        &page_alloc_tagging_ops,
  88#endif
  89#ifdef CONFIG_PAGE_TABLE_CHECK
  90        &page_table_check_ops,
  91#endif
  92};
  93
  94unsigned long page_ext_size;
  95
  96static unsigned long total_usage;
  97
  98#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
  99/*
 100 * To ensure correct allocation tagging for pages, page_ext should be available
 101 * before the first page allocation. Otherwise early task stacks will be
 102 * allocated before page_ext initialization and missing tags will be flagged.
 103 */
 104bool early_page_ext __meminitdata = true;
 105#else
 106bool early_page_ext __meminitdata;
 107#endif
 108static int __init setup_early_page_ext(char *str)
 109{
 110        early_page_ext = true;
 111        return 0;
 112}
 113early_param("early_page_ext", setup_early_page_ext);
 114
 115static bool __init invoke_need_callbacks(void)
 116{
 117        int i;
 118        int entries = ARRAY_SIZE(page_ext_ops);
 119        bool need = false;
 120
 121        for (i = 0; i < entries; i++) {
 122                if (page_ext_ops[i]->need()) {
 123                        if (page_ext_ops[i]->need_shared_flags) {
 124                                page_ext_size = sizeof(struct page_ext);
 125                                break;
 126                        }
 127                }
 128        }
 129
 130        for (i = 0; i < entries; i++) {
 131                if (page_ext_ops[i]->need()) {
 132                        page_ext_ops[i]->offset = page_ext_size;
 133                        page_ext_size += page_ext_ops[i]->size;
 134                        need = true;
 135                }
 136        }
 137
 138        return need;
 139}
 140
 141static void __init invoke_init_callbacks(void)
 142{
 143        int i;
 144        int entries = ARRAY_SIZE(page_ext_ops);
 145
 146        for (i = 0; i < entries; i++) {
 147                if (page_ext_ops[i]->init)
 148                        page_ext_ops[i]->init();
 149        }
 150}
 151
 152static inline struct page_ext *get_entry(void *base, unsigned long index)
 153{
 154        return base + page_ext_size * index;
 155}
 156
 157#ifndef CONFIG_SPARSEMEM
 158void __init page_ext_init_flatmem_late(void)
 159{
 160        invoke_init_callbacks();
 161}
 162
 163void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
 164{
 165        pgdat->node_page_ext = NULL;
 166}
 167
 168static struct page_ext *lookup_page_ext(const struct page *page)
 169{
 170        unsigned long pfn = page_to_pfn(page);
 171        unsigned long index;
 172        struct page_ext *base;
 173
 174        WARN_ON_ONCE(!rcu_read_lock_held());
 175        base = NODE_DATA(page_to_nid(page))->node_page_ext;
 176        /*
 177         * The sanity checks the page allocator does upon freeing a
 178         * page can reach here before the page_ext arrays are
 179         * allocated when feeding a range of pages to the allocator
 180         * for the first time during bootup or memory hotplug.
 181         */
 182        if (unlikely(!base))
 183                return NULL;
 184        index = pfn - round_down(node_start_pfn(page_to_nid(page)),
 185                                        MAX_ORDER_NR_PAGES);
 186        return get_entry(base, index);
 187}
 188
 189static int __init alloc_node_page_ext(int nid)
 190{
 191        struct page_ext *base;
 192        unsigned long table_size;
 193        unsigned long nr_pages;
 194
 195        nr_pages = NODE_DATA(nid)->node_spanned_pages;
 196        if (!nr_pages)
 197                return 0;
 198
 199        /*
 200         * Need extra space if node range is not aligned with
 201         * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
 202         * checks buddy's status, range could be out of exact node range.
 203         */
 204        if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
 205                !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
 206                nr_pages += MAX_ORDER_NR_PAGES;
 207
 208        table_size = page_ext_size * nr_pages;
 209
 210        base = memblock_alloc_try_nid(
 211                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
 212                        MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 213        if (!base)
 214                return -ENOMEM;
 215        NODE_DATA(nid)->node_page_ext = base;
 216        total_usage += table_size;
 217        memmap_boot_pages_add(DIV_ROUND_UP(table_size, PAGE_SIZE));
 218        return 0;
 219}
 220
 221void __init page_ext_init_flatmem(void)
 222{
 223
 224        int nid, fail;
 225
 226        if (!invoke_need_callbacks())
 227                return;
 228
 229        for_each_online_node(nid)  {
 230                fail = alloc_node_page_ext(nid);
 231                if (fail)
 232                        goto fail;
 233        }
 234        pr_info("allocated %ld bytes of page_ext\n", total_usage);
 235        return;
 236
 237fail:
 238        pr_crit("allocation of page_ext failed.\n");
 239        panic("Out of memory");
 240}
 241
 242#else /* CONFIG_SPARSEMEM */
 243static bool page_ext_invalid(struct page_ext *page_ext)
 244{
 245        return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
 246}
 247
 248static struct page_ext *lookup_page_ext(const struct page *page)
 249{
 250        unsigned long pfn = page_to_pfn(page);
 251        struct mem_section *section = __pfn_to_section(pfn);
 252        struct page_ext *page_ext = READ_ONCE(section->page_ext);
 253
 254        WARN_ON_ONCE(!rcu_read_lock_held());
 255        /*
 256         * The sanity checks the page allocator does upon freeing a
 257         * page can reach here before the page_ext arrays are
 258         * allocated when feeding a range of pages to the allocator
 259         * for the first time during bootup or memory hotplug.
 260         */
 261        if (page_ext_invalid(page_ext))
 262                return NULL;
 263        return get_entry(page_ext, pfn);
 264}
 265
 266static void *__meminit alloc_page_ext(size_t size, int nid)
 267{
 268        gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
 269        void *addr = NULL;
 270
 271        addr = alloc_pages_exact_nid(nid, size, flags);
 272        if (addr)
 273                kmemleak_alloc(addr, size, 1, flags);
 274        else
 275                addr = vzalloc_node(size, nid);
 276
 277        if (addr)
 278                memmap_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
 279
 280        return addr;
 281}
 282
 283static int __meminit init_section_page_ext(unsigned long pfn, int nid)
 284{
 285        struct mem_section *section;
 286        struct page_ext *base;
 287        unsigned long table_size;
 288
 289        section = __pfn_to_section(pfn);
 290
 291        if (section->page_ext)
 292                return 0;
 293
 294        table_size = page_ext_size * PAGES_PER_SECTION;
 295        base = alloc_page_ext(table_size, nid);
 296
 297        /*
 298         * The value stored in section->page_ext is (base - pfn)
 299         * and it does not point to the memory block allocated above,
 300         * causing kmemleak false positives.
 301         */
 302        kmemleak_not_leak(base);
 303
 304        if (!base) {
 305                pr_err("page ext allocation failure\n");
 306                return -ENOMEM;
 307        }
 308
 309        /*
 310         * The passed "pfn" may not be aligned to SECTION.  For the calculation
 311         * we need to apply a mask.
 312         */
 313        pfn &= PAGE_SECTION_MASK;
 314        section->page_ext = (void *)base - page_ext_size * pfn;
 315        total_usage += table_size;
 316        return 0;
 317}
 318
 319static void free_page_ext(void *addr)
 320{
 321        size_t table_size;
 322        struct page *page;
 323
 324        table_size = page_ext_size * PAGES_PER_SECTION;
 325        memmap_pages_add(-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
 326
 327        if (is_vmalloc_addr(addr)) {
 328                vfree(addr);
 329        } else {
 330                page = virt_to_page(addr);
 331                BUG_ON(PageReserved(page));
 332                kmemleak_free(addr);
 333                free_pages_exact(addr, table_size);
 334        }
 335}
 336
 337static void __free_page_ext(unsigned long pfn)
 338{
 339        struct mem_section *ms;
 340        struct page_ext *base;
 341
 342        ms = __pfn_to_section(pfn);
 343        if (!ms || !ms->page_ext)
 344                return;
 345
 346        base = READ_ONCE(ms->page_ext);
 347        /*
 348         * page_ext here can be valid while doing the roll back
 349         * operation in online_page_ext().
 350         */
 351        if (page_ext_invalid(base))
 352                base = (void *)base - PAGE_EXT_INVALID;
 353        WRITE_ONCE(ms->page_ext, NULL);
 354
 355        base = get_entry(base, pfn);
 356        free_page_ext(base);
 357}
 358
 359static void __invalidate_page_ext(unsigned long pfn)
 360{
 361        struct mem_section *ms;
 362        void *val;
 363
 364        ms = __pfn_to_section(pfn);
 365        if (!ms || !ms->page_ext)
 366                return;
 367        val = (void *)ms->page_ext + PAGE_EXT_INVALID;
 368        WRITE_ONCE(ms->page_ext, val);
 369}
 370
 371static int __meminit online_page_ext(unsigned long start_pfn,
 372                                unsigned long nr_pages)
 373{
 374        int nid = pfn_to_nid(start_pfn);
 375        unsigned long start, end, pfn;
 376        int fail = 0;
 377
 378        start = SECTION_ALIGN_DOWN(start_pfn);
 379        end = SECTION_ALIGN_UP(start_pfn + nr_pages);
 380
 381        for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
 382                fail = init_section_page_ext(pfn, nid);
 383        if (!fail)
 384                return 0;
 385
 386        /* rollback */
 387        end = pfn - PAGES_PER_SECTION;
 388        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
 389                __free_page_ext(pfn);
 390
 391        return -ENOMEM;
 392}
 393
 394static void __meminit offline_page_ext(unsigned long start_pfn,
 395                                unsigned long nr_pages)
 396{
 397        unsigned long start, end, pfn;
 398
 399        start = SECTION_ALIGN_DOWN(start_pfn);
 400        end = SECTION_ALIGN_UP(start_pfn + nr_pages);
 401
 402        /*
 403         * Freeing of page_ext is done in 3 steps to avoid
 404         * use-after-free of it:
 405         * 1) Traverse all the sections and mark their page_ext
 406         *    as invalid.
 407         * 2) Wait for all the existing users of page_ext who
 408         *    started before invalidation to finish.
 409         * 3) Free the page_ext.
 410         */
 411        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
 412                __invalidate_page_ext(pfn);
 413
 414        synchronize_rcu();
 415
 416        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
 417                __free_page_ext(pfn);
 418}
 419
 420static int __meminit page_ext_callback(struct notifier_block *self,
 421                               unsigned long action, void *arg)
 422{
 423        struct memory_notify *mn = arg;
 424        int ret = 0;
 425
 426        switch (action) {
 427        case MEM_GOING_ONLINE:
 428                ret = online_page_ext(mn->start_pfn, mn->nr_pages);
 429                break;
 430        case MEM_OFFLINE:
 431                offline_page_ext(mn->start_pfn,
 432                                mn->nr_pages);
 433                break;
 434        case MEM_CANCEL_ONLINE:
 435                offline_page_ext(mn->start_pfn,
 436                                mn->nr_pages);
 437                break;
 438        case MEM_GOING_OFFLINE:
 439                break;
 440        case MEM_ONLINE:
 441        case MEM_CANCEL_OFFLINE:
 442                break;
 443        }
 444
 445        return notifier_from_errno(ret);
 446}
 447
 448void __init page_ext_init(void)
 449{
 450        unsigned long pfn;
 451        int nid;
 452
 453        if (!invoke_need_callbacks())
 454                return;
 455
 456        for_each_node_state(nid, N_MEMORY) {
 457                unsigned long start_pfn, end_pfn;
 458
 459                start_pfn = node_start_pfn(nid);
 460                end_pfn = node_end_pfn(nid);
 461                /*
 462                 * start_pfn and end_pfn may not be aligned to SECTION and the
 463                 * page->flags of out of node pages are not initialized.  So we
 464                 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
 465                 */
 466                for (pfn = start_pfn; pfn < end_pfn;
 467                        pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
 468
 469                        if (!pfn_valid(pfn))
 470                                continue;
 471                        /*
 472                         * Nodes's pfns can be overlapping.
 473                         * We know some arch can have a nodes layout such as
 474                         * -------------pfn-------------->
 475                         * N0 | N1 | N2 | N0 | N1 | N2|....
 476                         */
 477                        if (pfn_to_nid(pfn) != nid)
 478                                continue;
 479                        if (init_section_page_ext(pfn, nid))
 480                                goto oom;
 481                        cond_resched();
 482                }
 483        }
 484        hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI);
 485        pr_info("allocated %ld bytes of page_ext\n", total_usage);
 486        invoke_init_callbacks();
 487        return;
 488
 489oom:
 490        panic("Out of memory");
 491}
 492
 493void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
 494{
 495}
 496
 497#endif
 498
 499/**
 500 * page_ext_lookup() - Lookup a page extension for a PFN.
 501 * @pfn: PFN of the page we're interested in.
 502 *
 503 * Must be called with RCU read lock taken and @pfn must be valid.
 504 *
 505 * Return: NULL if no page_ext exists for this page.
 506 */
 507struct page_ext *page_ext_lookup(unsigned long pfn)
 508{
 509        return lookup_page_ext(pfn_to_page(pfn));
 510}
 511
 512/**
 513 * page_ext_get() - Get the extended information for a page.
 514 * @page: The page we're interested in.
 515 *
 516 * Ensures that the page_ext will remain valid until page_ext_put()
 517 * is called.
 518 *
 519 * Return: NULL if no page_ext exists for this page.
 520 * Context: Any context.  Caller may not sleep until they have called
 521 * page_ext_put().
 522 */
 523struct page_ext *page_ext_get(const struct page *page)
 524{
 525        struct page_ext *page_ext;
 526
 527        rcu_read_lock();
 528        page_ext = lookup_page_ext(page);
 529        if (!page_ext) {
 530                rcu_read_unlock();
 531                return NULL;
 532        }
 533
 534        return page_ext;
 535}
 536
 537/**
 538 * page_ext_put() - Working with page extended information is done.
 539 * @page_ext: Page extended information received from page_ext_get().
 540 *
 541 * The page extended information of the page may not be valid after this
 542 * function is called.
 543 *
 544 * Return: None.
 545 * Context: Any context with corresponding page_ext_get() is called.
 546 */
 547void page_ext_put(struct page_ext *page_ext)
 548{
 549        if (unlikely(!page_ext))
 550                return;
 551
 552        rcu_read_unlock();
 553}
 554