linux/mm/page_owner.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/debugfs.h>
   3#include <linux/mm.h>
   4#include <linux/slab.h>
   5#include <linux/uaccess.h>
   6#include <linux/bootmem.h>
   7#include <linux/stacktrace.h>
   8#include <linux/page_owner.h>
   9#include <linux/jump_label.h>
  10#include <linux/migrate.h>
  11#include <linux/stackdepot.h>
  12#include <linux/seq_file.h>
  13
  14#include "internal.h"
  15
  16/*
  17 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
  18 * to use off stack temporal storage
  19 */
  20#define PAGE_OWNER_STACK_DEPTH (16)
  21
  22struct page_owner {
  23        unsigned int order;
  24        gfp_t gfp_mask;
  25        int last_migrate_reason;
  26        depot_stack_handle_t handle;
  27};
  28
  29static bool page_owner_disabled = true;
  30DEFINE_STATIC_KEY_FALSE(page_owner_inited);
  31
  32static depot_stack_handle_t dummy_handle;
  33static depot_stack_handle_t failure_handle;
  34static depot_stack_handle_t early_handle;
  35
  36static void init_early_allocated_pages(void);
  37
  38static int early_page_owner_param(char *buf)
  39{
  40        if (!buf)
  41                return -EINVAL;
  42
  43        if (strcmp(buf, "on") == 0)
  44                page_owner_disabled = false;
  45
  46        return 0;
  47}
  48early_param("page_owner", early_page_owner_param);
  49
  50static bool need_page_owner(void)
  51{
  52        if (page_owner_disabled)
  53                return false;
  54
  55        return true;
  56}
  57
  58static __always_inline depot_stack_handle_t create_dummy_stack(void)
  59{
  60        unsigned long entries[4];
  61        struct stack_trace dummy;
  62
  63        dummy.nr_entries = 0;
  64        dummy.max_entries = ARRAY_SIZE(entries);
  65        dummy.entries = &entries[0];
  66        dummy.skip = 0;
  67
  68        save_stack_trace(&dummy);
  69        return depot_save_stack(&dummy, GFP_KERNEL);
  70}
  71
  72static noinline void register_dummy_stack(void)
  73{
  74        dummy_handle = create_dummy_stack();
  75}
  76
  77static noinline void register_failure_stack(void)
  78{
  79        failure_handle = create_dummy_stack();
  80}
  81
  82static noinline void register_early_stack(void)
  83{
  84        early_handle = create_dummy_stack();
  85}
  86
  87static void init_page_owner(void)
  88{
  89        if (page_owner_disabled)
  90                return;
  91
  92        register_dummy_stack();
  93        register_failure_stack();
  94        register_early_stack();
  95        static_branch_enable(&page_owner_inited);
  96        init_early_allocated_pages();
  97}
  98
  99struct page_ext_operations page_owner_ops = {
 100        .size = sizeof(struct page_owner),
 101        .need = need_page_owner,
 102        .init = init_page_owner,
 103};
 104
 105static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
 106{
 107        return (void *)page_ext + page_owner_ops.offset;
 108}
 109
 110void __reset_page_owner(struct page *page, unsigned int order)
 111{
 112        int i;
 113        struct page_ext *page_ext;
 114
 115        for (i = 0; i < (1 << order); i++) {
 116                page_ext = lookup_page_ext(page + i);
 117                if (unlikely(!page_ext))
 118                        continue;
 119                __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
 120        }
 121}
 122
 123static inline bool check_recursive_alloc(struct stack_trace *trace,
 124                                        unsigned long ip)
 125{
 126        int i, count;
 127
 128        if (!trace->nr_entries)
 129                return false;
 130
 131        for (i = 0, count = 0; i < trace->nr_entries; i++) {
 132                if (trace->entries[i] == ip && ++count == 2)
 133                        return true;
 134        }
 135
 136        return false;
 137}
 138
 139static noinline depot_stack_handle_t save_stack(gfp_t flags)
 140{
 141        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 142        struct stack_trace trace = {
 143                .nr_entries = 0,
 144                .entries = entries,
 145                .max_entries = PAGE_OWNER_STACK_DEPTH,
 146                .skip = 2
 147        };
 148        depot_stack_handle_t handle;
 149
 150        save_stack_trace(&trace);
 151        if (trace.nr_entries != 0 &&
 152            trace.entries[trace.nr_entries-1] == ULONG_MAX)
 153                trace.nr_entries--;
 154
 155        /*
 156         * We need to check recursion here because our request to stackdepot
 157         * could trigger memory allocation to save new entry. New memory
 158         * allocation would reach here and call depot_save_stack() again
 159         * if we don't catch it. There is still not enough memory in stackdepot
 160         * so it would try to allocate memory again and loop forever.
 161         */
 162        if (check_recursive_alloc(&trace, _RET_IP_))
 163                return dummy_handle;
 164
 165        handle = depot_save_stack(&trace, flags);
 166        if (!handle)
 167                handle = failure_handle;
 168
 169        return handle;
 170}
 171
 172static inline void __set_page_owner_handle(struct page_ext *page_ext,
 173        depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
 174{
 175        struct page_owner *page_owner;
 176
 177        page_owner = get_page_owner(page_ext);
 178        page_owner->handle = handle;
 179        page_owner->order = order;
 180        page_owner->gfp_mask = gfp_mask;
 181        page_owner->last_migrate_reason = -1;
 182
 183        __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
 184}
 185
 186noinline void __set_page_owner(struct page *page, unsigned int order,
 187                                        gfp_t gfp_mask)
 188{
 189        struct page_ext *page_ext = lookup_page_ext(page);
 190        depot_stack_handle_t handle;
 191
 192        if (unlikely(!page_ext))
 193                return;
 194
 195        handle = save_stack(gfp_mask);
 196        __set_page_owner_handle(page_ext, handle, order, gfp_mask);
 197}
 198
 199void __set_page_owner_migrate_reason(struct page *page, int reason)
 200{
 201        struct page_ext *page_ext = lookup_page_ext(page);
 202        struct page_owner *page_owner;
 203
 204        if (unlikely(!page_ext))
 205                return;
 206
 207        page_owner = get_page_owner(page_ext);
 208        page_owner->last_migrate_reason = reason;
 209}
 210
 211void __split_page_owner(struct page *page, unsigned int order)
 212{
 213        int i;
 214        struct page_ext *page_ext = lookup_page_ext(page);
 215        struct page_owner *page_owner;
 216
 217        if (unlikely(!page_ext))
 218                return;
 219
 220        page_owner = get_page_owner(page_ext);
 221        page_owner->order = 0;
 222        for (i = 1; i < (1 << order); i++)
 223                __copy_page_owner(page, page + i);
 224}
 225
 226void __copy_page_owner(struct page *oldpage, struct page *newpage)
 227{
 228        struct page_ext *old_ext = lookup_page_ext(oldpage);
 229        struct page_ext *new_ext = lookup_page_ext(newpage);
 230        struct page_owner *old_page_owner, *new_page_owner;
 231
 232        if (unlikely(!old_ext || !new_ext))
 233                return;
 234
 235        old_page_owner = get_page_owner(old_ext);
 236        new_page_owner = get_page_owner(new_ext);
 237        new_page_owner->order = old_page_owner->order;
 238        new_page_owner->gfp_mask = old_page_owner->gfp_mask;
 239        new_page_owner->last_migrate_reason =
 240                old_page_owner->last_migrate_reason;
 241        new_page_owner->handle = old_page_owner->handle;
 242
 243        /*
 244         * We don't clear the bit on the oldpage as it's going to be freed
 245         * after migration. Until then, the info can be useful in case of
 246         * a bug, and the overal stats will be off a bit only temporarily.
 247         * Also, migrate_misplaced_transhuge_page() can still fail the
 248         * migration and then we want the oldpage to retain the info. But
 249         * in that case we also don't need to explicitly clear the info from
 250         * the new page, which will be freed.
 251         */
 252        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
 253}
 254
 255void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 256                                       pg_data_t *pgdat, struct zone *zone)
 257{
 258        struct page *page;
 259        struct page_ext *page_ext;
 260        struct page_owner *page_owner;
 261        unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
 262        unsigned long end_pfn = pfn + zone->spanned_pages;
 263        unsigned long count[MIGRATE_TYPES] = { 0, };
 264        int pageblock_mt, page_mt;
 265        int i;
 266
 267        /* Scan block by block. First and last block may be incomplete */
 268        pfn = zone->zone_start_pfn;
 269
 270        /*
 271         * Walk the zone in pageblock_nr_pages steps. If a page block spans
 272         * a zone boundary, it will be double counted between zones. This does
 273         * not matter as the mixed block count will still be correct
 274         */
 275        for (; pfn < end_pfn; ) {
 276                if (!pfn_valid(pfn)) {
 277                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 278                        continue;
 279                }
 280
 281                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 282                block_end_pfn = min(block_end_pfn, end_pfn);
 283
 284                page = pfn_to_page(pfn);
 285                pageblock_mt = get_pageblock_migratetype(page);
 286
 287                for (; pfn < block_end_pfn; pfn++) {
 288                        if (!pfn_valid_within(pfn))
 289                                continue;
 290
 291                        page = pfn_to_page(pfn);
 292
 293                        if (page_zone(page) != zone)
 294                                continue;
 295
 296                        if (PageBuddy(page)) {
 297                                unsigned long freepage_order;
 298
 299                                freepage_order = page_order_unsafe(page);
 300                                if (freepage_order < MAX_ORDER)
 301                                        pfn += (1UL << freepage_order) - 1;
 302                                continue;
 303                        }
 304
 305                        if (PageReserved(page))
 306                                continue;
 307
 308                        page_ext = lookup_page_ext(page);
 309                        if (unlikely(!page_ext))
 310                                continue;
 311
 312                        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 313                                continue;
 314
 315                        page_owner = get_page_owner(page_ext);
 316                        page_mt = gfpflags_to_migratetype(
 317                                        page_owner->gfp_mask);
 318                        if (pageblock_mt != page_mt) {
 319                                if (is_migrate_cma(pageblock_mt))
 320                                        count[MIGRATE_MOVABLE]++;
 321                                else
 322                                        count[pageblock_mt]++;
 323
 324                                pfn = block_end_pfn;
 325                                break;
 326                        }
 327                        pfn += (1UL << page_owner->order) - 1;
 328                }
 329        }
 330
 331        /* Print counts */
 332        seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 333        for (i = 0; i < MIGRATE_TYPES; i++)
 334                seq_printf(m, "%12lu ", count[i]);
 335        seq_putc(m, '\n');
 336}
 337
 338static ssize_t
 339print_page_owner(char __user *buf, size_t count, unsigned long pfn,
 340                struct page *page, struct page_owner *page_owner,
 341                depot_stack_handle_t handle)
 342{
 343        int ret;
 344        int pageblock_mt, page_mt;
 345        char *kbuf;
 346        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 347        struct stack_trace trace = {
 348                .nr_entries = 0,
 349                .entries = entries,
 350                .max_entries = PAGE_OWNER_STACK_DEPTH,
 351                .skip = 0
 352        };
 353
 354        kbuf = kmalloc(count, GFP_KERNEL);
 355        if (!kbuf)
 356                return -ENOMEM;
 357
 358        ret = snprintf(kbuf, count,
 359                        "Page allocated via order %u, mask %#x(%pGg)\n",
 360                        page_owner->order, page_owner->gfp_mask,
 361                        &page_owner->gfp_mask);
 362
 363        if (ret >= count)
 364                goto err;
 365
 366        /* Print information relevant to grouping pages by mobility */
 367        pageblock_mt = get_pageblock_migratetype(page);
 368        page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
 369        ret += snprintf(kbuf + ret, count - ret,
 370                        "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
 371                        pfn,
 372                        migratetype_names[page_mt],
 373                        pfn >> pageblock_order,
 374                        migratetype_names[pageblock_mt],
 375                        page->flags, &page->flags);
 376
 377        if (ret >= count)
 378                goto err;
 379
 380        depot_fetch_stack(handle, &trace);
 381        ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
 382        if (ret >= count)
 383                goto err;
 384
 385        if (page_owner->last_migrate_reason != -1) {
 386                ret += snprintf(kbuf + ret, count - ret,
 387                        "Page has been migrated, last migrate reason: %s\n",
 388                        migrate_reason_names[page_owner->last_migrate_reason]);
 389                if (ret >= count)
 390                        goto err;
 391        }
 392
 393        ret += snprintf(kbuf + ret, count - ret, "\n");
 394        if (ret >= count)
 395                goto err;
 396
 397        if (copy_to_user(buf, kbuf, ret))
 398                ret = -EFAULT;
 399
 400        kfree(kbuf);
 401        return ret;
 402
 403err:
 404        kfree(kbuf);
 405        return -ENOMEM;
 406}
 407
 408void __dump_page_owner(struct page *page)
 409{
 410        struct page_ext *page_ext = lookup_page_ext(page);
 411        struct page_owner *page_owner;
 412        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 413        struct stack_trace trace = {
 414                .nr_entries = 0,
 415                .entries = entries,
 416                .max_entries = PAGE_OWNER_STACK_DEPTH,
 417                .skip = 0
 418        };
 419        depot_stack_handle_t handle;
 420        gfp_t gfp_mask;
 421        int mt;
 422
 423        if (unlikely(!page_ext)) {
 424                pr_alert("There is not page extension available.\n");
 425                return;
 426        }
 427
 428        page_owner = get_page_owner(page_ext);
 429        gfp_mask = page_owner->gfp_mask;
 430        mt = gfpflags_to_migratetype(gfp_mask);
 431
 432        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
 433                pr_alert("page_owner info is not active (free page?)\n");
 434                return;
 435        }
 436
 437        handle = READ_ONCE(page_owner->handle);
 438        if (!handle) {
 439                pr_alert("page_owner info is not active (free page?)\n");
 440                return;
 441        }
 442
 443        depot_fetch_stack(handle, &trace);
 444        pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
 445                 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
 446        print_stack_trace(&trace, 0);
 447
 448        if (page_owner->last_migrate_reason != -1)
 449                pr_alert("page has been migrated, last migrate reason: %s\n",
 450                        migrate_reason_names[page_owner->last_migrate_reason]);
 451}
 452
 453static ssize_t
 454read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 455{
 456        unsigned long pfn;
 457        struct page *page;
 458        struct page_ext *page_ext;
 459        struct page_owner *page_owner;
 460        depot_stack_handle_t handle;
 461
 462        if (!static_branch_unlikely(&page_owner_inited))
 463                return -EINVAL;
 464
 465        page = NULL;
 466        pfn = min_low_pfn + *ppos;
 467
 468        /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
 469        while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
 470                pfn++;
 471
 472        drain_all_pages(NULL);
 473
 474        /* Find an allocated page */
 475        for (; pfn < max_pfn; pfn++) {
 476                /*
 477                 * If the new page is in a new MAX_ORDER_NR_PAGES area,
 478                 * validate the area as existing, skip it if not
 479                 */
 480                if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
 481                        pfn += MAX_ORDER_NR_PAGES - 1;
 482                        continue;
 483                }
 484
 485                /* Check for holes within a MAX_ORDER area */
 486                if (!pfn_valid_within(pfn))
 487                        continue;
 488
 489                page = pfn_to_page(pfn);
 490                if (PageBuddy(page)) {
 491                        unsigned long freepage_order = page_order_unsafe(page);
 492
 493                        if (freepage_order < MAX_ORDER)
 494                                pfn += (1UL << freepage_order) - 1;
 495                        continue;
 496                }
 497
 498                page_ext = lookup_page_ext(page);
 499                if (unlikely(!page_ext))
 500                        continue;
 501
 502                /*
 503                 * Some pages could be missed by concurrent allocation or free,
 504                 * because we don't hold the zone lock.
 505                 */
 506                if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 507                        continue;
 508
 509                page_owner = get_page_owner(page_ext);
 510
 511                /*
 512                 * Access to page_ext->handle isn't synchronous so we should
 513                 * be careful to access it.
 514                 */
 515                handle = READ_ONCE(page_owner->handle);
 516                if (!handle)
 517                        continue;
 518
 519                /* Record the next PFN to read in the file offset */
 520                *ppos = (pfn - min_low_pfn) + 1;
 521
 522                return print_page_owner(buf, count, pfn, page,
 523                                page_owner, handle);
 524        }
 525
 526        return 0;
 527}
 528
 529static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 530{
 531        struct page *page;
 532        struct page_ext *page_ext;
 533        unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
 534        unsigned long end_pfn = pfn + zone->spanned_pages;
 535        unsigned long count = 0;
 536
 537        /* Scan block by block. First and last block may be incomplete */
 538        pfn = zone->zone_start_pfn;
 539
 540        /*
 541         * Walk the zone in pageblock_nr_pages steps. If a page block spans
 542         * a zone boundary, it will be double counted between zones. This does
 543         * not matter as the mixed block count will still be correct
 544         */
 545        for (; pfn < end_pfn; ) {
 546                if (!pfn_valid(pfn)) {
 547                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 548                        continue;
 549                }
 550
 551                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 552                block_end_pfn = min(block_end_pfn, end_pfn);
 553
 554                page = pfn_to_page(pfn);
 555
 556                for (; pfn < block_end_pfn; pfn++) {
 557                        if (!pfn_valid_within(pfn))
 558                                continue;
 559
 560                        page = pfn_to_page(pfn);
 561
 562                        if (page_zone(page) != zone)
 563                                continue;
 564
 565                        /*
 566                         * To avoid having to grab zone->lock, be a little
 567                         * careful when reading buddy page order. The only
 568                         * danger is that we skip too much and potentially miss
 569                         * some early allocated pages, which is better than
 570                         * heavy lock contention.
 571                         */
 572                        if (PageBuddy(page)) {
 573                                unsigned long order = page_order_unsafe(page);
 574
 575                                if (order > 0 && order < MAX_ORDER)
 576                                        pfn += (1UL << order) - 1;
 577                                continue;
 578                        }
 579
 580                        if (PageReserved(page))
 581                                continue;
 582
 583                        page_ext = lookup_page_ext(page);
 584                        if (unlikely(!page_ext))
 585                                continue;
 586
 587                        /* Maybe overlapping zone */
 588                        if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 589                                continue;
 590
 591                        /* Found early allocated page */
 592                        __set_page_owner_handle(page_ext, early_handle, 0, 0);
 593                        count++;
 594                }
 595                cond_resched();
 596        }
 597
 598        pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
 599                pgdat->node_id, zone->name, count);
 600}
 601
 602static void init_zones_in_node(pg_data_t *pgdat)
 603{
 604        struct zone *zone;
 605        struct zone *node_zones = pgdat->node_zones;
 606
 607        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 608                if (!populated_zone(zone))
 609                        continue;
 610
 611                init_pages_in_zone(pgdat, zone);
 612        }
 613}
 614
 615static void init_early_allocated_pages(void)
 616{
 617        pg_data_t *pgdat;
 618
 619        drain_all_pages(NULL);
 620        for_each_online_pgdat(pgdat)
 621                init_zones_in_node(pgdat);
 622}
 623
 624static const struct file_operations proc_page_owner_operations = {
 625        .read           = read_page_owner,
 626};
 627
 628static int __init pageowner_init(void)
 629{
 630        struct dentry *dentry;
 631
 632        if (!static_branch_unlikely(&page_owner_inited)) {
 633                pr_info("page_owner is disabled\n");
 634                return 0;
 635        }
 636
 637        dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
 638                        NULL, &proc_page_owner_operations);
 639        if (IS_ERR(dentry))
 640                return PTR_ERR(dentry);
 641
 642        return 0;
 643}
 644late_initcall(pageowner_init)
 645