linux/mm/page_ext.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/mm.h>
   3#include <linux/mmzone.h>
   4#include <linux/memblock.h>
   5#include <linux/page_ext.h>
   6#include <linux/memory.h>
   7#include <linux/vmalloc.h>
   8#include <linux/kmemleak.h>
   9#include <linux/page_owner.h>
  10#include <linux/page_idle.h>
  11
  12/*
  13 * struct page extension
  14 *
  15 * This is the feature to manage memory for extended data per page.
  16 *
  17 * Until now, we must modify struct page itself to store extra data per page.
  18 * This requires rebuilding the kernel and it is really time consuming process.
  19 * And, sometimes, rebuild is impossible due to third party module dependency.
  20 * At last, enlarging struct page could cause un-wanted system behaviour change.
  21 *
  22 * This feature is intended to overcome above mentioned problems. This feature
  23 * allocates memory for extended data per page in certain place rather than
  24 * the struct page itself. This memory can be accessed by the accessor
  25 * functions provided by this code. During the boot process, it checks whether
  26 * allocation of huge chunk of memory is needed or not. If not, it avoids
  27 * allocating memory at all. With this advantage, we can include this feature
  28 * into the kernel in default and can avoid rebuild and solve related problems.
  29 *
  30 * To help these things to work well, there are two callbacks for clients. One
  31 * is the need callback which is mandatory if user wants to avoid useless
  32 * memory allocation at boot-time. The other is optional, init callback, which
  33 * is used to do proper initialization after memory is allocated.
  34 *
  35 * The need callback is used to decide whether extended memory allocation is
  36 * needed or not. Sometimes users want to deactivate some features in this
  37 * boot and extra memory would be unnecessary. In this case, to avoid
  38 * allocating huge chunk of memory, each clients represent their need of
  39 * extra memory through the need callback. If one of the need callbacks
  40 * returns true, it means that someone needs extra memory so that
  41 * page extension core should allocates memory for page extension. If
  42 * none of need callbacks return true, memory isn't needed at all in this boot
  43 * and page extension core can skip to allocate memory. As result,
  44 * none of memory is wasted.
  45 *
  46 * When need callback returns true, page_ext checks if there is a request for
  47 * extra memory through size in struct page_ext_operations. If it is non-zero,
  48 * extra space is allocated for each page_ext entry and offset is returned to
  49 * user through offset in struct page_ext_operations.
  50 *
  51 * The init callback is used to do proper initialization after page extension
  52 * is completely initialized. In sparse memory system, extra memory is
  53 * allocated some time later than memmap is allocated. In other words, lifetime
  54 * of memory for page extension isn't same with memmap for struct page.
  55 * Therefore, clients can't store extra data until page extension is
  56 * initialized, even if pages are allocated and used freely. This could
  57 * cause inadequate state of extra data per page, so, to prevent it, client
  58 * can utilize this callback to initialize the state of it correctly.
  59 */
  60
  61#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
  62static bool need_page_idle(void)
  63{
  64        return true;
  65}
  66struct page_ext_operations page_idle_ops = {
  67        .need = need_page_idle,
  68};
  69#endif
  70
  71static struct page_ext_operations *page_ext_ops[] = {
  72#ifdef CONFIG_PAGE_OWNER
  73        &page_owner_ops,
  74#endif
  75#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
  76        &page_idle_ops,
  77#endif
  78};
  79
  80unsigned long page_ext_size = sizeof(struct page_ext);
  81
  82static unsigned long total_usage;
  83
  84static bool __init invoke_need_callbacks(void)
  85{
  86        int i;
  87        int entries = ARRAY_SIZE(page_ext_ops);
  88        bool need = false;
  89
  90        for (i = 0; i < entries; i++) {
  91                if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
  92                        page_ext_ops[i]->offset = page_ext_size;
  93                        page_ext_size += page_ext_ops[i]->size;
  94                        need = true;
  95                }
  96        }
  97
  98        return need;
  99}
 100
 101static void __init invoke_init_callbacks(void)
 102{
 103        int i;
 104        int entries = ARRAY_SIZE(page_ext_ops);
 105
 106        for (i = 0; i < entries; i++) {
 107                if (page_ext_ops[i]->init)
 108                        page_ext_ops[i]->init();
 109        }
 110}
 111
 112#ifndef CONFIG_SPARSEMEM
 113void __init page_ext_init_flatmem_late(void)
 114{
 115        invoke_init_callbacks();
 116}
 117#endif
 118
 119static inline struct page_ext *get_entry(void *base, unsigned long index)
 120{
 121        return base + page_ext_size * index;
 122}
 123
 124#ifndef CONFIG_SPARSEMEM
 125
 126
 127void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
 128{
 129        pgdat->node_page_ext = NULL;
 130}
 131
 132struct page_ext *lookup_page_ext(const struct page *page)
 133{
 134        unsigned long pfn = page_to_pfn(page);
 135        unsigned long index;
 136        struct page_ext *base;
 137
 138        base = NODE_DATA(page_to_nid(page))->node_page_ext;
 139        /*
 140         * The sanity checks the page allocator does upon freeing a
 141         * page can reach here before the page_ext arrays are
 142         * allocated when feeding a range of pages to the allocator
 143         * for the first time during bootup or memory hotplug.
 144         */
 145        if (unlikely(!base))
 146                return NULL;
 147        index = pfn - round_down(node_start_pfn(page_to_nid(page)),
 148                                        MAX_ORDER_NR_PAGES);
 149        return get_entry(base, index);
 150}
 151
 152static int __init alloc_node_page_ext(int nid)
 153{
 154        struct page_ext *base;
 155        unsigned long table_size;
 156        unsigned long nr_pages;
 157
 158        nr_pages = NODE_DATA(nid)->node_spanned_pages;
 159        if (!nr_pages)
 160                return 0;
 161
 162        /*
 163         * Need extra space if node range is not aligned with
 164         * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
 165         * checks buddy's status, range could be out of exact node range.
 166         */
 167        if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
 168                !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
 169                nr_pages += MAX_ORDER_NR_PAGES;
 170
 171        table_size = page_ext_size * nr_pages;
 172
 173        base = memblock_alloc_try_nid(
 174                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
 175                        MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 176        if (!base)
 177                return -ENOMEM;
 178        NODE_DATA(nid)->node_page_ext = base;
 179        total_usage += table_size;
 180        return 0;
 181}
 182
 183void __init page_ext_init_flatmem(void)
 184{
 185
 186        int nid, fail;
 187
 188        if (!invoke_need_callbacks())
 189                return;
 190
 191        for_each_online_node(nid)  {
 192                fail = alloc_node_page_ext(nid);
 193                if (fail)
 194                        goto fail;
 195        }
 196        pr_info("allocated %ld bytes of page_ext\n", total_usage);
 197        return;
 198
 199fail:
 200        pr_crit("allocation of page_ext failed.\n");
 201        panic("Out of memory");
 202}
 203
 204#else /* CONFIG_FLATMEM */
 205
 206struct page_ext *lookup_page_ext(const struct page *page)
 207{
 208        unsigned long pfn = page_to_pfn(page);
 209        struct mem_section *section = __pfn_to_section(pfn);
 210        /*
 211         * The sanity checks the page allocator does upon freeing a
 212         * page can reach here before the page_ext arrays are
 213         * allocated when feeding a range of pages to the allocator
 214         * for the first time during bootup or memory hotplug.
 215         */
 216        if (!section->page_ext)
 217                return NULL;
 218        return get_entry(section->page_ext, pfn);
 219}
 220
 221static void *__meminit alloc_page_ext(size_t size, int nid)
 222{
 223        gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
 224        void *addr = NULL;
 225
 226        addr = alloc_pages_exact_nid(nid, size, flags);
 227        if (addr) {
 228                kmemleak_alloc(addr, size, 1, flags);
 229                return addr;
 230        }
 231
 232        addr = vzalloc_node(size, nid);
 233
 234        return addr;
 235}
 236
 237static int __meminit init_section_page_ext(unsigned long pfn, int nid)
 238{
 239        struct mem_section *section;
 240        struct page_ext *base;
 241        unsigned long table_size;
 242
 243        section = __pfn_to_section(pfn);
 244
 245        if (section->page_ext)
 246                return 0;
 247
 248        table_size = page_ext_size * PAGES_PER_SECTION;
 249        base = alloc_page_ext(table_size, nid);
 250
 251        /*
 252         * The value stored in section->page_ext is (base - pfn)
 253         * and it does not point to the memory block allocated above,
 254         * causing kmemleak false positives.
 255         */
 256        kmemleak_not_leak(base);
 257
 258        if (!base) {
 259                pr_err("page ext allocation failure\n");
 260                return -ENOMEM;
 261        }
 262
 263        /*
 264         * The passed "pfn" may not be aligned to SECTION.  For the calculation
 265         * we need to apply a mask.
 266         */
 267        pfn &= PAGE_SECTION_MASK;
 268        section->page_ext = (void *)base - page_ext_size * pfn;
 269        total_usage += table_size;
 270        return 0;
 271}
 272
 273static void free_page_ext(void *addr)
 274{
 275        if (is_vmalloc_addr(addr)) {
 276                vfree(addr);
 277        } else {
 278                struct page *page = virt_to_page(addr);
 279                size_t table_size;
 280
 281                table_size = page_ext_size * PAGES_PER_SECTION;
 282
 283                BUG_ON(PageReserved(page));
 284                kmemleak_free(addr);
 285                free_pages_exact(addr, table_size);
 286        }
 287}
 288
 289static void __free_page_ext(unsigned long pfn)
 290{
 291        struct mem_section *ms;
 292        struct page_ext *base;
 293
 294        ms = __pfn_to_section(pfn);
 295        if (!ms || !ms->page_ext)
 296                return;
 297        base = get_entry(ms->page_ext, pfn);
 298        free_page_ext(base);
 299        ms->page_ext = NULL;
 300}
 301
 302static int __meminit online_page_ext(unsigned long start_pfn,
 303                                unsigned long nr_pages,
 304                                int nid)
 305{
 306        unsigned long start, end, pfn;
 307        int fail = 0;
 308
 309        start = SECTION_ALIGN_DOWN(start_pfn);
 310        end = SECTION_ALIGN_UP(start_pfn + nr_pages);
 311
 312        if (nid == NUMA_NO_NODE) {
 313                /*
 314                 * In this case, "nid" already exists and contains valid memory.
 315                 * "start_pfn" passed to us is a pfn which is an arg for
 316                 * online__pages(), and start_pfn should exist.
 317                 */
 318                nid = pfn_to_nid(start_pfn);
 319                VM_BUG_ON(!node_state(nid, N_ONLINE));
 320        }
 321
 322        for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
 323                fail = init_section_page_ext(pfn, nid);
 324        if (!fail)
 325                return 0;
 326
 327        /* rollback */
 328        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
 329                __free_page_ext(pfn);
 330
 331        return -ENOMEM;
 332}
 333
 334static int __meminit offline_page_ext(unsigned long start_pfn,
 335                                unsigned long nr_pages, int nid)
 336{
 337        unsigned long start, end, pfn;
 338
 339        start = SECTION_ALIGN_DOWN(start_pfn);
 340        end = SECTION_ALIGN_UP(start_pfn + nr_pages);
 341
 342        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
 343                __free_page_ext(pfn);
 344        return 0;
 345
 346}
 347
 348static int __meminit page_ext_callback(struct notifier_block *self,
 349                               unsigned long action, void *arg)
 350{
 351        struct memory_notify *mn = arg;
 352        int ret = 0;
 353
 354        switch (action) {
 355        case MEM_GOING_ONLINE:
 356                ret = online_page_ext(mn->start_pfn,
 357                                   mn->nr_pages, mn->status_change_nid);
 358                break;
 359        case MEM_OFFLINE:
 360                offline_page_ext(mn->start_pfn,
 361                                mn->nr_pages, mn->status_change_nid);
 362                break;
 363        case MEM_CANCEL_ONLINE:
 364                offline_page_ext(mn->start_pfn,
 365                                mn->nr_pages, mn->status_change_nid);
 366                break;
 367        case MEM_GOING_OFFLINE:
 368                break;
 369        case MEM_ONLINE:
 370        case MEM_CANCEL_OFFLINE:
 371                break;
 372        }
 373
 374        return notifier_from_errno(ret);
 375}
 376
 377void __init page_ext_init(void)
 378{
 379        unsigned long pfn;
 380        int nid;
 381
 382        if (!invoke_need_callbacks())
 383                return;
 384
 385        for_each_node_state(nid, N_MEMORY) {
 386                unsigned long start_pfn, end_pfn;
 387
 388                start_pfn = node_start_pfn(nid);
 389                end_pfn = node_end_pfn(nid);
 390                /*
 391                 * start_pfn and end_pfn may not be aligned to SECTION and the
 392                 * page->flags of out of node pages are not initialized.  So we
 393                 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
 394                 */
 395                for (pfn = start_pfn; pfn < end_pfn;
 396                        pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
 397
 398                        if (!pfn_valid(pfn))
 399                                continue;
 400                        /*
 401                         * Nodes's pfns can be overlapping.
 402                         * We know some arch can have a nodes layout such as
 403                         * -------------pfn-------------->
 404                         * N0 | N1 | N2 | N0 | N1 | N2|....
 405                         */
 406                        if (pfn_to_nid(pfn) != nid)
 407                                continue;
 408                        if (init_section_page_ext(pfn, nid))
 409                                goto oom;
 410                        cond_resched();
 411                }
 412        }
 413        hotplug_memory_notifier(page_ext_callback, 0);
 414        pr_info("allocated %ld bytes of page_ext\n", total_usage);
 415        invoke_init_callbacks();
 416        return;
 417
 418oom:
 419        panic("Out of memory");
 420}
 421
 422void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
 423{
 424}
 425
 426#endif
 427