linux/mm/page_owner.c
<<
>>
Prefs
   1#include <linux/debugfs.h>
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/uaccess.h>
   5#include <linux/bootmem.h>
   6#include <linux/stacktrace.h>
   7#include <linux/page_owner.h>
   8#include <linux/jump_label.h>
   9#include <linux/migrate.h>
  10#include <linux/stackdepot.h>
  11#include <linux/seq_file.h>
  12
  13#include "internal.h"
  14
  15/*
  16 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
  17 * to use off stack temporal storage
  18 */
  19#define PAGE_OWNER_STACK_DEPTH (16)
  20
  21struct page_owner {
  22        unsigned int order;
  23        gfp_t gfp_mask;
  24        int last_migrate_reason;
  25        depot_stack_handle_t handle;
  26};
  27
  28static bool page_owner_disabled = true;
  29DEFINE_STATIC_KEY_FALSE(page_owner_inited);
  30
  31static depot_stack_handle_t dummy_handle;
  32static depot_stack_handle_t failure_handle;
  33
  34static void init_early_allocated_pages(void);
  35
  36static int early_page_owner_param(char *buf)
  37{
  38        if (!buf)
  39                return -EINVAL;
  40
  41        if (strcmp(buf, "on") == 0)
  42                page_owner_disabled = false;
  43
  44        return 0;
  45}
  46early_param("page_owner", early_page_owner_param);
  47
  48static bool need_page_owner(void)
  49{
  50        if (page_owner_disabled)
  51                return false;
  52
  53        return true;
  54}
  55
  56static noinline void register_dummy_stack(void)
  57{
  58        unsigned long entries[4];
  59        struct stack_trace dummy;
  60
  61        dummy.nr_entries = 0;
  62        dummy.max_entries = ARRAY_SIZE(entries);
  63        dummy.entries = &entries[0];
  64        dummy.skip = 0;
  65
  66        save_stack_trace(&dummy);
  67        dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
  68}
  69
  70static noinline void register_failure_stack(void)
  71{
  72        unsigned long entries[4];
  73        struct stack_trace failure;
  74
  75        failure.nr_entries = 0;
  76        failure.max_entries = ARRAY_SIZE(entries);
  77        failure.entries = &entries[0];
  78        failure.skip = 0;
  79
  80        save_stack_trace(&failure);
  81        failure_handle = depot_save_stack(&failure, GFP_KERNEL);
  82}
  83
  84static void init_page_owner(void)
  85{
  86        if (page_owner_disabled)
  87                return;
  88
  89        register_dummy_stack();
  90        register_failure_stack();
  91        static_branch_enable(&page_owner_inited);
  92        init_early_allocated_pages();
  93}
  94
  95struct page_ext_operations page_owner_ops = {
  96        .size = sizeof(struct page_owner),
  97        .need = need_page_owner,
  98        .init = init_page_owner,
  99};
 100
 101static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
 102{
 103        return (void *)page_ext + page_owner_ops.offset;
 104}
 105
 106void __reset_page_owner(struct page *page, unsigned int order)
 107{
 108        int i;
 109        struct page_ext *page_ext;
 110
 111        for (i = 0; i < (1 << order); i++) {
 112                page_ext = lookup_page_ext(page + i);
 113                if (unlikely(!page_ext))
 114                        continue;
 115                __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
 116        }
 117}
 118
 119static inline bool check_recursive_alloc(struct stack_trace *trace,
 120                                        unsigned long ip)
 121{
 122        int i, count;
 123
 124        if (!trace->nr_entries)
 125                return false;
 126
 127        for (i = 0, count = 0; i < trace->nr_entries; i++) {
 128                if (trace->entries[i] == ip && ++count == 2)
 129                        return true;
 130        }
 131
 132        return false;
 133}
 134
 135static noinline depot_stack_handle_t save_stack(gfp_t flags)
 136{
 137        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 138        struct stack_trace trace = {
 139                .nr_entries = 0,
 140                .entries = entries,
 141                .max_entries = PAGE_OWNER_STACK_DEPTH,
 142                .skip = 0
 143        };
 144        depot_stack_handle_t handle;
 145
 146        save_stack_trace(&trace);
 147        if (trace.nr_entries != 0 &&
 148            trace.entries[trace.nr_entries-1] == ULONG_MAX)
 149                trace.nr_entries--;
 150
 151        /*
 152         * We need to check recursion here because our request to stackdepot
 153         * could trigger memory allocation to save new entry. New memory
 154         * allocation would reach here and call depot_save_stack() again
 155         * if we don't catch it. There is still not enough memory in stackdepot
 156         * so it would try to allocate memory again and loop forever.
 157         */
 158        if (check_recursive_alloc(&trace, _RET_IP_))
 159                return dummy_handle;
 160
 161        handle = depot_save_stack(&trace, flags);
 162        if (!handle)
 163                handle = failure_handle;
 164
 165        return handle;
 166}
 167
 168noinline void __set_page_owner(struct page *page, unsigned int order,
 169                                        gfp_t gfp_mask)
 170{
 171        struct page_ext *page_ext = lookup_page_ext(page);
 172        struct page_owner *page_owner;
 173
 174        if (unlikely(!page_ext))
 175                return;
 176
 177        page_owner = get_page_owner(page_ext);
 178        page_owner->handle = save_stack(gfp_mask);
 179        page_owner->order = order;
 180        page_owner->gfp_mask = gfp_mask;
 181        page_owner->last_migrate_reason = -1;
 182
 183        __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
 184}
 185
 186void __set_page_owner_migrate_reason(struct page *page, int reason)
 187{
 188        struct page_ext *page_ext = lookup_page_ext(page);
 189        struct page_owner *page_owner;
 190
 191        if (unlikely(!page_ext))
 192                return;
 193
 194        page_owner = get_page_owner(page_ext);
 195        page_owner->last_migrate_reason = reason;
 196}
 197
 198void __split_page_owner(struct page *page, unsigned int order)
 199{
 200        int i;
 201        struct page_ext *page_ext = lookup_page_ext(page);
 202        struct page_owner *page_owner;
 203
 204        if (unlikely(!page_ext))
 205                return;
 206
 207        page_owner = get_page_owner(page_ext);
 208        page_owner->order = 0;
 209        for (i = 1; i < (1 << order); i++)
 210                __copy_page_owner(page, page + i);
 211}
 212
 213void __copy_page_owner(struct page *oldpage, struct page *newpage)
 214{
 215        struct page_ext *old_ext = lookup_page_ext(oldpage);
 216        struct page_ext *new_ext = lookup_page_ext(newpage);
 217        struct page_owner *old_page_owner, *new_page_owner;
 218
 219        if (unlikely(!old_ext || !new_ext))
 220                return;
 221
 222        old_page_owner = get_page_owner(old_ext);
 223        new_page_owner = get_page_owner(new_ext);
 224        new_page_owner->order = old_page_owner->order;
 225        new_page_owner->gfp_mask = old_page_owner->gfp_mask;
 226        new_page_owner->last_migrate_reason =
 227                old_page_owner->last_migrate_reason;
 228        new_page_owner->handle = old_page_owner->handle;
 229
 230        /*
 231         * We don't clear the bit on the oldpage as it's going to be freed
 232         * after migration. Until then, the info can be useful in case of
 233         * a bug, and the overal stats will be off a bit only temporarily.
 234         * Also, migrate_misplaced_transhuge_page() can still fail the
 235         * migration and then we want the oldpage to retain the info. But
 236         * in that case we also don't need to explicitly clear the info from
 237         * the new page, which will be freed.
 238         */
 239        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
 240}
 241
 242void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 243                                       pg_data_t *pgdat, struct zone *zone)
 244{
 245        struct page *page;
 246        struct page_ext *page_ext;
 247        struct page_owner *page_owner;
 248        unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
 249        unsigned long end_pfn = pfn + zone->spanned_pages;
 250        unsigned long count[MIGRATE_TYPES] = { 0, };
 251        int pageblock_mt, page_mt;
 252        int i;
 253
 254        /* Scan block by block. First and last block may be incomplete */
 255        pfn = zone->zone_start_pfn;
 256
 257        /*
 258         * Walk the zone in pageblock_nr_pages steps. If a page block spans
 259         * a zone boundary, it will be double counted between zones. This does
 260         * not matter as the mixed block count will still be correct
 261         */
 262        for (; pfn < end_pfn; ) {
 263                if (!pfn_valid(pfn)) {
 264                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 265                        continue;
 266                }
 267
 268                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 269                block_end_pfn = min(block_end_pfn, end_pfn);
 270
 271                page = pfn_to_page(pfn);
 272                pageblock_mt = get_pageblock_migratetype(page);
 273
 274                for (; pfn < block_end_pfn; pfn++) {
 275                        if (!pfn_valid_within(pfn))
 276                                continue;
 277
 278                        page = pfn_to_page(pfn);
 279
 280                        if (page_zone(page) != zone)
 281                                continue;
 282
 283                        if (PageBuddy(page)) {
 284                                unsigned long freepage_order;
 285
 286                                freepage_order = page_order_unsafe(page);
 287                                if (freepage_order < MAX_ORDER)
 288                                        pfn += (1UL << freepage_order) - 1;
 289                                continue;
 290                        }
 291
 292                        if (PageReserved(page))
 293                                continue;
 294
 295                        page_ext = lookup_page_ext(page);
 296                        if (unlikely(!page_ext))
 297                                continue;
 298
 299                        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 300                                continue;
 301
 302                        page_owner = get_page_owner(page_ext);
 303                        page_mt = gfpflags_to_migratetype(
 304                                        page_owner->gfp_mask);
 305                        if (pageblock_mt != page_mt) {
 306                                if (is_migrate_cma(pageblock_mt))
 307                                        count[MIGRATE_MOVABLE]++;
 308                                else
 309                                        count[pageblock_mt]++;
 310
 311                                pfn = block_end_pfn;
 312                                break;
 313                        }
 314                        pfn += (1UL << page_owner->order) - 1;
 315                }
 316        }
 317
 318        /* Print counts */
 319        seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
 320        for (i = 0; i < MIGRATE_TYPES; i++)
 321                seq_printf(m, "%12lu ", count[i]);
 322        seq_putc(m, '\n');
 323}
 324
 325static ssize_t
 326print_page_owner(char __user *buf, size_t count, unsigned long pfn,
 327                struct page *page, struct page_owner *page_owner,
 328                depot_stack_handle_t handle)
 329{
 330        int ret;
 331        int pageblock_mt, page_mt;
 332        char *kbuf;
 333        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 334        struct stack_trace trace = {
 335                .nr_entries = 0,
 336                .entries = entries,
 337                .max_entries = PAGE_OWNER_STACK_DEPTH,
 338                .skip = 0
 339        };
 340
 341        kbuf = kmalloc(count, GFP_KERNEL);
 342        if (!kbuf)
 343                return -ENOMEM;
 344
 345        ret = snprintf(kbuf, count,
 346                        "Page allocated via order %u, mask %#x(%pGg)\n",
 347                        page_owner->order, page_owner->gfp_mask,
 348                        &page_owner->gfp_mask);
 349
 350        if (ret >= count)
 351                goto err;
 352
 353        /* Print information relevant to grouping pages by mobility */
 354        pageblock_mt = get_pageblock_migratetype(page);
 355        page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
 356        ret += snprintf(kbuf + ret, count - ret,
 357                        "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
 358                        pfn,
 359                        migratetype_names[page_mt],
 360                        pfn >> pageblock_order,
 361                        migratetype_names[pageblock_mt],
 362                        page->flags, &page->flags);
 363
 364        if (ret >= count)
 365                goto err;
 366
 367        depot_fetch_stack(handle, &trace);
 368        ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
 369        if (ret >= count)
 370                goto err;
 371
 372        if (page_owner->last_migrate_reason != -1) {
 373                ret += snprintf(kbuf + ret, count - ret,
 374                        "Page has been migrated, last migrate reason: %s\n",
 375                        migrate_reason_names[page_owner->last_migrate_reason]);
 376                if (ret >= count)
 377                        goto err;
 378        }
 379
 380        ret += snprintf(kbuf + ret, count - ret, "\n");
 381        if (ret >= count)
 382                goto err;
 383
 384        if (copy_to_user(buf, kbuf, ret))
 385                ret = -EFAULT;
 386
 387        kfree(kbuf);
 388        return ret;
 389
 390err:
 391        kfree(kbuf);
 392        return -ENOMEM;
 393}
 394
 395void __dump_page_owner(struct page *page)
 396{
 397        struct page_ext *page_ext = lookup_page_ext(page);
 398        struct page_owner *page_owner;
 399        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 400        struct stack_trace trace = {
 401                .nr_entries = 0,
 402                .entries = entries,
 403                .max_entries = PAGE_OWNER_STACK_DEPTH,
 404                .skip = 0
 405        };
 406        depot_stack_handle_t handle;
 407        gfp_t gfp_mask;
 408        int mt;
 409
 410        if (unlikely(!page_ext)) {
 411                pr_alert("There is not page extension available.\n");
 412                return;
 413        }
 414
 415        page_owner = get_page_owner(page_ext);
 416        gfp_mask = page_owner->gfp_mask;
 417        mt = gfpflags_to_migratetype(gfp_mask);
 418
 419        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
 420                pr_alert("page_owner info is not active (free page?)\n");
 421                return;
 422        }
 423
 424        handle = READ_ONCE(page_owner->handle);
 425        if (!handle) {
 426                pr_alert("page_owner info is not active (free page?)\n");
 427                return;
 428        }
 429
 430        depot_fetch_stack(handle, &trace);
 431        pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
 432                 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
 433        print_stack_trace(&trace, 0);
 434
 435        if (page_owner->last_migrate_reason != -1)
 436                pr_alert("page has been migrated, last migrate reason: %s\n",
 437                        migrate_reason_names[page_owner->last_migrate_reason]);
 438}
 439
 440static ssize_t
 441read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 442{
 443        unsigned long pfn;
 444        struct page *page;
 445        struct page_ext *page_ext;
 446        struct page_owner *page_owner;
 447        depot_stack_handle_t handle;
 448
 449        if (!static_branch_unlikely(&page_owner_inited))
 450                return -EINVAL;
 451
 452        page = NULL;
 453        pfn = min_low_pfn + *ppos;
 454
 455        /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
 456        while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
 457                pfn++;
 458
 459        drain_all_pages(NULL);
 460
 461        /* Find an allocated page */
 462        for (; pfn < max_pfn; pfn++) {
 463                /*
 464                 * If the new page is in a new MAX_ORDER_NR_PAGES area,
 465                 * validate the area as existing, skip it if not
 466                 */
 467                if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
 468                        pfn += MAX_ORDER_NR_PAGES - 1;
 469                        continue;
 470                }
 471
 472                /* Check for holes within a MAX_ORDER area */
 473                if (!pfn_valid_within(pfn))
 474                        continue;
 475
 476                page = pfn_to_page(pfn);
 477                if (PageBuddy(page)) {
 478                        unsigned long freepage_order = page_order_unsafe(page);
 479
 480                        if (freepage_order < MAX_ORDER)
 481                                pfn += (1UL << freepage_order) - 1;
 482                        continue;
 483                }
 484
 485                page_ext = lookup_page_ext(page);
 486                if (unlikely(!page_ext))
 487                        continue;
 488
 489                /*
 490                 * Some pages could be missed by concurrent allocation or free,
 491                 * because we don't hold the zone lock.
 492                 */
 493                if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 494                        continue;
 495
 496                page_owner = get_page_owner(page_ext);
 497
 498                /*
 499                 * Access to page_ext->handle isn't synchronous so we should
 500                 * be careful to access it.
 501                 */
 502                handle = READ_ONCE(page_owner->handle);
 503                if (!handle)
 504                        continue;
 505
 506                /* Record the next PFN to read in the file offset */
 507                *ppos = (pfn - min_low_pfn) + 1;
 508
 509                return print_page_owner(buf, count, pfn, page,
 510                                page_owner, handle);
 511        }
 512
 513        return 0;
 514}
 515
 516static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 517{
 518        struct page *page;
 519        struct page_ext *page_ext;
 520        unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
 521        unsigned long end_pfn = pfn + zone->spanned_pages;
 522        unsigned long count = 0;
 523
 524        /* Scan block by block. First and last block may be incomplete */
 525        pfn = zone->zone_start_pfn;
 526
 527        /*
 528         * Walk the zone in pageblock_nr_pages steps. If a page block spans
 529         * a zone boundary, it will be double counted between zones. This does
 530         * not matter as the mixed block count will still be correct
 531         */
 532        for (; pfn < end_pfn; ) {
 533                if (!pfn_valid(pfn)) {
 534                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 535                        continue;
 536                }
 537
 538                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 539                block_end_pfn = min(block_end_pfn, end_pfn);
 540
 541                page = pfn_to_page(pfn);
 542
 543                for (; pfn < block_end_pfn; pfn++) {
 544                        if (!pfn_valid_within(pfn))
 545                                continue;
 546
 547                        page = pfn_to_page(pfn);
 548
 549                        if (page_zone(page) != zone)
 550                                continue;
 551
 552                        /*
 553                         * We are safe to check buddy flag and order, because
 554                         * this is init stage and only single thread runs.
 555                         */
 556                        if (PageBuddy(page)) {
 557                                pfn += (1UL << page_order(page)) - 1;
 558                                continue;
 559                        }
 560
 561                        if (PageReserved(page))
 562                                continue;
 563
 564                        page_ext = lookup_page_ext(page);
 565                        if (unlikely(!page_ext))
 566                                continue;
 567
 568                        /* Maybe overraping zone */
 569                        if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 570                                continue;
 571
 572                        /* Found early allocated page */
 573                        set_page_owner(page, 0, 0);
 574                        count++;
 575                }
 576        }
 577
 578        pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
 579                pgdat->node_id, zone->name, count);
 580}
 581
 582static void init_zones_in_node(pg_data_t *pgdat)
 583{
 584        struct zone *zone;
 585        struct zone *node_zones = pgdat->node_zones;
 586        unsigned long flags;
 587
 588        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 589                if (!populated_zone(zone))
 590                        continue;
 591
 592                spin_lock_irqsave(&zone->lock, flags);
 593                init_pages_in_zone(pgdat, zone);
 594                spin_unlock_irqrestore(&zone->lock, flags);
 595        }
 596}
 597
 598static void init_early_allocated_pages(void)
 599{
 600        pg_data_t *pgdat;
 601
 602        drain_all_pages(NULL);
 603        for_each_online_pgdat(pgdat)
 604                init_zones_in_node(pgdat);
 605}
 606
 607static const struct file_operations proc_page_owner_operations = {
 608        .read           = read_page_owner,
 609};
 610
 611static int __init pageowner_init(void)
 612{
 613        struct dentry *dentry;
 614
 615        if (!static_branch_unlikely(&page_owner_inited)) {
 616                pr_info("page_owner is disabled\n");
 617                return 0;
 618        }
 619
 620        dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
 621                        NULL, &proc_page_owner_operations);
 622        if (IS_ERR(dentry))
 623                return PTR_ERR(dentry);
 624
 625        return 0;
 626}
 627late_initcall(pageowner_init)
 628