linux/mm/page_owner.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/debugfs.h>
   3#include <linux/mm.h>
   4#include <linux/slab.h>
   5#include <linux/uaccess.h>
   6#include <linux/memblock.h>
   7#include <linux/stacktrace.h>
   8#include <linux/page_owner.h>
   9#include <linux/jump_label.h>
  10#include <linux/migrate.h>
  11#include <linux/stackdepot.h>
  12#include <linux/seq_file.h>
  13
  14#include "internal.h"
  15
  16/*
  17 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
  18 * to use off stack temporal storage
  19 */
  20#define PAGE_OWNER_STACK_DEPTH (16)
  21
  22struct page_owner {
  23        unsigned short order;
  24        short last_migrate_reason;
  25        gfp_t gfp_mask;
  26        depot_stack_handle_t handle;
  27};
  28
  29static bool page_owner_disabled = true;
  30DEFINE_STATIC_KEY_FALSE(page_owner_inited);
  31
  32static depot_stack_handle_t dummy_handle;
  33static depot_stack_handle_t failure_handle;
  34static depot_stack_handle_t early_handle;
  35
  36static void init_early_allocated_pages(void);
  37
  38static int __init early_page_owner_param(char *buf)
  39{
  40        if (!buf)
  41                return -EINVAL;
  42
  43        if (strcmp(buf, "on") == 0)
  44                page_owner_disabled = false;
  45
  46        return 0;
  47}
  48early_param("page_owner", early_page_owner_param);
  49
  50static bool need_page_owner(void)
  51{
  52        if (page_owner_disabled)
  53                return false;
  54
  55        return true;
  56}
  57
  58static __always_inline depot_stack_handle_t create_dummy_stack(void)
  59{
  60        unsigned long entries[4];
  61        unsigned int nr_entries;
  62
  63        nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
  64        return stack_depot_save(entries, nr_entries, GFP_KERNEL);
  65}
  66
  67static noinline void register_dummy_stack(void)
  68{
  69        dummy_handle = create_dummy_stack();
  70}
  71
  72static noinline void register_failure_stack(void)
  73{
  74        failure_handle = create_dummy_stack();
  75}
  76
  77static noinline void register_early_stack(void)
  78{
  79        early_handle = create_dummy_stack();
  80}
  81
  82static void init_page_owner(void)
  83{
  84        if (page_owner_disabled)
  85                return;
  86
  87        register_dummy_stack();
  88        register_failure_stack();
  89        register_early_stack();
  90        static_branch_enable(&page_owner_inited);
  91        init_early_allocated_pages();
  92}
  93
  94struct page_ext_operations page_owner_ops = {
  95        .size = sizeof(struct page_owner),
  96        .need = need_page_owner,
  97        .init = init_page_owner,
  98};
  99
 100static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
 101{
 102        return (void *)page_ext + page_owner_ops.offset;
 103}
 104
 105void __reset_page_owner(struct page *page, unsigned int order)
 106{
 107        int i;
 108        struct page_ext *page_ext;
 109
 110        for (i = 0; i < (1 << order); i++) {
 111                page_ext = lookup_page_ext(page + i);
 112                if (unlikely(!page_ext))
 113                        continue;
 114                __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
 115        }
 116}
 117
 118static inline bool check_recursive_alloc(unsigned long *entries,
 119                                         unsigned int nr_entries,
 120                                         unsigned long ip)
 121{
 122        unsigned int i;
 123
 124        for (i = 0; i < nr_entries; i++) {
 125                if (entries[i] == ip)
 126                        return true;
 127        }
 128        return false;
 129}
 130
 131static noinline depot_stack_handle_t save_stack(gfp_t flags)
 132{
 133        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 134        depot_stack_handle_t handle;
 135        unsigned int nr_entries;
 136
 137        nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
 138
 139        /*
 140         * We need to check recursion here because our request to
 141         * stackdepot could trigger memory allocation to save new
 142         * entry. New memory allocation would reach here and call
 143         * stack_depot_save_entries() again if we don't catch it. There is
 144         * still not enough memory in stackdepot so it would try to
 145         * allocate memory again and loop forever.
 146         */
 147        if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
 148                return dummy_handle;
 149
 150        handle = stack_depot_save(entries, nr_entries, flags);
 151        if (!handle)
 152                handle = failure_handle;
 153
 154        return handle;
 155}
 156
 157static inline void __set_page_owner_handle(struct page_ext *page_ext,
 158        depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
 159{
 160        struct page_owner *page_owner;
 161
 162        page_owner = get_page_owner(page_ext);
 163        page_owner->handle = handle;
 164        page_owner->order = order;
 165        page_owner->gfp_mask = gfp_mask;
 166        page_owner->last_migrate_reason = -1;
 167
 168        __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
 169}
 170
 171noinline void __set_page_owner(struct page *page, unsigned int order,
 172                                        gfp_t gfp_mask)
 173{
 174        struct page_ext *page_ext = lookup_page_ext(page);
 175        depot_stack_handle_t handle;
 176
 177        if (unlikely(!page_ext))
 178                return;
 179
 180        handle = save_stack(gfp_mask);
 181        __set_page_owner_handle(page_ext, handle, order, gfp_mask);
 182}
 183
 184void __set_page_owner_migrate_reason(struct page *page, int reason)
 185{
 186        struct page_ext *page_ext = lookup_page_ext(page);
 187        struct page_owner *page_owner;
 188
 189        if (unlikely(!page_ext))
 190                return;
 191
 192        page_owner = get_page_owner(page_ext);
 193        page_owner->last_migrate_reason = reason;
 194}
 195
 196void __split_page_owner(struct page *page, unsigned int order)
 197{
 198        int i;
 199        struct page_ext *page_ext = lookup_page_ext(page);
 200        struct page_owner *page_owner;
 201
 202        if (unlikely(!page_ext))
 203                return;
 204
 205        page_owner = get_page_owner(page_ext);
 206        page_owner->order = 0;
 207        for (i = 1; i < (1 << order); i++)
 208                __copy_page_owner(page, page + i);
 209}
 210
 211void __copy_page_owner(struct page *oldpage, struct page *newpage)
 212{
 213        struct page_ext *old_ext = lookup_page_ext(oldpage);
 214        struct page_ext *new_ext = lookup_page_ext(newpage);
 215        struct page_owner *old_page_owner, *new_page_owner;
 216
 217        if (unlikely(!old_ext || !new_ext))
 218                return;
 219
 220        old_page_owner = get_page_owner(old_ext);
 221        new_page_owner = get_page_owner(new_ext);
 222        new_page_owner->order = old_page_owner->order;
 223        new_page_owner->gfp_mask = old_page_owner->gfp_mask;
 224        new_page_owner->last_migrate_reason =
 225                old_page_owner->last_migrate_reason;
 226        new_page_owner->handle = old_page_owner->handle;
 227
 228        /*
 229         * We don't clear the bit on the oldpage as it's going to be freed
 230         * after migration. Until then, the info can be useful in case of
 231         * a bug, and the overal stats will be off a bit only temporarily.
 232         * Also, migrate_misplaced_transhuge_page() can still fail the
 233         * migration and then we want the oldpage to retain the info. But
 234         * in that case we also don't need to explicitly clear the info from
 235         * the new page, which will be freed.
 236         */
 237        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
 238}
 239
 240void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 241                                       pg_data_t *pgdat, struct zone *zone)
 242{
 243        struct page *page;
 244        struct page_ext *page_ext;
 245        struct page_owner *page_owner;
 246        unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
 247        unsigned long end_pfn = pfn + zone->spanned_pages;
 248        unsigned long count[MIGRATE_TYPES] = { 0, };
 249        int pageblock_mt, page_mt;
 250        int i;
 251
 252        /* Scan block by block. First and last block may be incomplete */
 253        pfn = zone->zone_start_pfn;
 254
 255        /*
 256         * Walk the zone in pageblock_nr_pages steps. If a page block spans
 257         * a zone boundary, it will be double counted between zones. This does
 258         * not matter as the mixed block count will still be correct
 259         */
 260        for (; pfn < end_pfn; ) {
 261                if (!pfn_valid(pfn)) {
 262                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 263                        continue;
 264                }
 265
 266                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 267                block_end_pfn = min(block_end_pfn, end_pfn);
 268
 269                page = pfn_to_page(pfn);
 270                pageblock_mt = get_pageblock_migratetype(page);
 271
 272                for (; pfn < block_end_pfn; pfn++) {
 273                        if (!pfn_valid_within(pfn))
 274                                continue;
 275
 276                        page = pfn_to_page(pfn);
 277
 278                        if (page_zone(page) != zone)
 279                                continue;
 280
 281                        if (PageBuddy(page)) {
 282                                unsigned long freepage_order;
 283
 284                                freepage_order = page_order_unsafe(page);
 285                                if (freepage_order < MAX_ORDER)
 286                                        pfn += (1UL << freepage_order) - 1;
 287                                continue;
 288                        }
 289
 290                        if (PageReserved(page))
 291                                continue;
 292
 293                        page_ext = lookup_page_ext(page);
 294                        if (unlikely(!page_ext))
 295                                continue;
 296
 297                        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 298                                continue;
 299
 300                        page_owner = get_page_owner(page_ext);
 301                        page_mt = gfpflags_to_migratetype(
 302                                        page_owner->gfp_mask);
 303                        if (pageblock_mt != page_mt) {
 304                                if (is_migrate_cma(pageblock_mt))
 305                                        count[MIGRATE_MOVABLE]++;
 306                                else
 307                                        count[pageblock_mt]++;
 308
 309                                pfn = block_end_pfn;
 310                                break;
 311                        }
 312                        pfn += (1UL << page_owner->order) - 1;
 313                }
 314        }
 315
 316        /* Print counts */
 317        seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 318        for (i = 0; i < MIGRATE_TYPES; i++)
 319                seq_printf(m, "%12lu ", count[i]);
 320        seq_putc(m, '\n');
 321}
 322
 323static ssize_t
 324print_page_owner(char __user *buf, size_t count, unsigned long pfn,
 325                struct page *page, struct page_owner *page_owner,
 326                depot_stack_handle_t handle)
 327{
 328        int ret, pageblock_mt, page_mt;
 329        unsigned long *entries;
 330        unsigned int nr_entries;
 331        char *kbuf;
 332
 333        count = min_t(size_t, count, PAGE_SIZE);
 334        kbuf = kmalloc(count, GFP_KERNEL);
 335        if (!kbuf)
 336                return -ENOMEM;
 337
 338        ret = snprintf(kbuf, count,
 339                        "Page allocated via order %u, mask %#x(%pGg)\n",
 340                        page_owner->order, page_owner->gfp_mask,
 341                        &page_owner->gfp_mask);
 342
 343        if (ret >= count)
 344                goto err;
 345
 346        /* Print information relevant to grouping pages by mobility */
 347        pageblock_mt = get_pageblock_migratetype(page);
 348        page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
 349        ret += snprintf(kbuf + ret, count - ret,
 350                        "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
 351                        pfn,
 352                        migratetype_names[page_mt],
 353                        pfn >> pageblock_order,
 354                        migratetype_names[pageblock_mt],
 355                        page->flags, &page->flags);
 356
 357        if (ret >= count)
 358                goto err;
 359
 360        nr_entries = stack_depot_fetch(handle, &entries);
 361        ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
 362        if (ret >= count)
 363                goto err;
 364
 365        if (page_owner->last_migrate_reason != -1) {
 366                ret += snprintf(kbuf + ret, count - ret,
 367                        "Page has been migrated, last migrate reason: %s\n",
 368                        migrate_reason_names[page_owner->last_migrate_reason]);
 369                if (ret >= count)
 370                        goto err;
 371        }
 372
 373        ret += snprintf(kbuf + ret, count - ret, "\n");
 374        if (ret >= count)
 375                goto err;
 376
 377        if (copy_to_user(buf, kbuf, ret))
 378                ret = -EFAULT;
 379
 380        kfree(kbuf);
 381        return ret;
 382
 383err:
 384        kfree(kbuf);
 385        return -ENOMEM;
 386}
 387
 388void __dump_page_owner(struct page *page)
 389{
 390        struct page_ext *page_ext = lookup_page_ext(page);
 391        struct page_owner *page_owner;
 392        depot_stack_handle_t handle;
 393        unsigned long *entries;
 394        unsigned int nr_entries;
 395        gfp_t gfp_mask;
 396        int mt;
 397
 398        if (unlikely(!page_ext)) {
 399                pr_alert("There is not page extension available.\n");
 400                return;
 401        }
 402
 403        page_owner = get_page_owner(page_ext);
 404        gfp_mask = page_owner->gfp_mask;
 405        mt = gfpflags_to_migratetype(gfp_mask);
 406
 407        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
 408                pr_alert("page_owner info is not active (free page?)\n");
 409                return;
 410        }
 411
 412        handle = READ_ONCE(page_owner->handle);
 413        if (!handle) {
 414                pr_alert("page_owner info is not active (free page?)\n");
 415                return;
 416        }
 417
 418        nr_entries = stack_depot_fetch(handle, &entries);
 419        pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
 420                 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
 421        stack_trace_print(entries, nr_entries, 0);
 422
 423        if (page_owner->last_migrate_reason != -1)
 424                pr_alert("page has been migrated, last migrate reason: %s\n",
 425                        migrate_reason_names[page_owner->last_migrate_reason]);
 426}
 427
 428static ssize_t
 429read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 430{
 431        unsigned long pfn;
 432        struct page *page;
 433        struct page_ext *page_ext;
 434        struct page_owner *page_owner;
 435        depot_stack_handle_t handle;
 436
 437        if (!static_branch_unlikely(&page_owner_inited))
 438                return -EINVAL;
 439
 440        page = NULL;
 441        pfn = min_low_pfn + *ppos;
 442
 443        /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
 444        while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
 445                pfn++;
 446
 447        drain_all_pages(NULL);
 448
 449        /* Find an allocated page */
 450        for (; pfn < max_pfn; pfn++) {
 451                /*
 452                 * If the new page is in a new MAX_ORDER_NR_PAGES area,
 453                 * validate the area as existing, skip it if not
 454                 */
 455                if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
 456                        pfn += MAX_ORDER_NR_PAGES - 1;
 457                        continue;
 458                }
 459
 460                /* Check for holes within a MAX_ORDER area */
 461                if (!pfn_valid_within(pfn))
 462                        continue;
 463
 464                page = pfn_to_page(pfn);
 465                if (PageBuddy(page)) {
 466                        unsigned long freepage_order = page_order_unsafe(page);
 467
 468                        if (freepage_order < MAX_ORDER)
 469                                pfn += (1UL << freepage_order) - 1;
 470                        continue;
 471                }
 472
 473                page_ext = lookup_page_ext(page);
 474                if (unlikely(!page_ext))
 475                        continue;
 476
 477                /*
 478                 * Some pages could be missed by concurrent allocation or free,
 479                 * because we don't hold the zone lock.
 480                 */
 481                if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 482                        continue;
 483
 484                page_owner = get_page_owner(page_ext);
 485
 486                /*
 487                 * Access to page_ext->handle isn't synchronous so we should
 488                 * be careful to access it.
 489                 */
 490                handle = READ_ONCE(page_owner->handle);
 491                if (!handle)
 492                        continue;
 493
 494                /* Record the next PFN to read in the file offset */
 495                *ppos = (pfn - min_low_pfn) + 1;
 496
 497                return print_page_owner(buf, count, pfn, page,
 498                                page_owner, handle);
 499        }
 500
 501        return 0;
 502}
 503
 504static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 505{
 506        unsigned long pfn = zone->zone_start_pfn;
 507        unsigned long end_pfn = zone_end_pfn(zone);
 508        unsigned long count = 0;
 509
 510        /*
 511         * Walk the zone in pageblock_nr_pages steps. If a page block spans
 512         * a zone boundary, it will be double counted between zones. This does
 513         * not matter as the mixed block count will still be correct
 514         */
 515        for (; pfn < end_pfn; ) {
 516                unsigned long block_end_pfn;
 517
 518                if (!pfn_valid(pfn)) {
 519                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 520                        continue;
 521                }
 522
 523                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 524                block_end_pfn = min(block_end_pfn, end_pfn);
 525
 526                for (; pfn < block_end_pfn; pfn++) {
 527                        struct page *page;
 528                        struct page_ext *page_ext;
 529
 530                        if (!pfn_valid_within(pfn))
 531                                continue;
 532
 533                        page = pfn_to_page(pfn);
 534
 535                        if (page_zone(page) != zone)
 536                                continue;
 537
 538                        /*
 539                         * To avoid having to grab zone->lock, be a little
 540                         * careful when reading buddy page order. The only
 541                         * danger is that we skip too much and potentially miss
 542                         * some early allocated pages, which is better than
 543                         * heavy lock contention.
 544                         */
 545                        if (PageBuddy(page)) {
 546                                unsigned long order = page_order_unsafe(page);
 547
 548                                if (order > 0 && order < MAX_ORDER)
 549                                        pfn += (1UL << order) - 1;
 550                                continue;
 551                        }
 552
 553                        if (PageReserved(page))
 554                                continue;
 555
 556                        page_ext = lookup_page_ext(page);
 557                        if (unlikely(!page_ext))
 558                                continue;
 559
 560                        /* Maybe overlapping zone */
 561                        if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 562                                continue;
 563
 564                        /* Found early allocated page */
 565                        __set_page_owner_handle(page_ext, early_handle, 0, 0);
 566                        count++;
 567                }
 568                cond_resched();
 569        }
 570
 571        pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
 572                pgdat->node_id, zone->name, count);
 573}
 574
 575static void init_zones_in_node(pg_data_t *pgdat)
 576{
 577        struct zone *zone;
 578        struct zone *node_zones = pgdat->node_zones;
 579
 580        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 581                if (!populated_zone(zone))
 582                        continue;
 583
 584                init_pages_in_zone(pgdat, zone);
 585        }
 586}
 587
 588static void init_early_allocated_pages(void)
 589{
 590        pg_data_t *pgdat;
 591
 592        for_each_online_pgdat(pgdat)
 593                init_zones_in_node(pgdat);
 594}
 595
 596static const struct file_operations proc_page_owner_operations = {
 597        .read           = read_page_owner,
 598};
 599
 600static int __init pageowner_init(void)
 601{
 602        if (!static_branch_unlikely(&page_owner_inited)) {
 603                pr_info("page_owner is disabled\n");
 604                return 0;
 605        }
 606
 607        debugfs_create_file("page_owner", 0400, NULL, NULL,
 608                            &proc_page_owner_operations);
 609
 610        return 0;
 611}
 612late_initcall(pageowner_init)
 613