linux/mm/page_owner.c
<<
>>
Prefs
   1#include <linux/debugfs.h>
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/uaccess.h>
   5#include <linux/bootmem.h>
   6#include <linux/stacktrace.h>
   7#include <linux/page_owner.h>
   8#include <linux/jump_label.h>
   9#include <linux/migrate.h>
  10#include "internal.h"
  11
  12static bool page_owner_disabled = true;
  13DEFINE_STATIC_KEY_FALSE(page_owner_inited);
  14
  15static void init_early_allocated_pages(void);
  16
  17static int early_page_owner_param(char *buf)
  18{
  19        if (!buf)
  20                return -EINVAL;
  21
  22        if (strcmp(buf, "on") == 0)
  23                page_owner_disabled = false;
  24
  25        return 0;
  26}
  27early_param("page_owner", early_page_owner_param);
  28
  29static bool need_page_owner(void)
  30{
  31        if (page_owner_disabled)
  32                return false;
  33
  34        return true;
  35}
  36
  37static void init_page_owner(void)
  38{
  39        if (page_owner_disabled)
  40                return;
  41
  42        static_branch_enable(&page_owner_inited);
  43        init_early_allocated_pages();
  44}
  45
  46struct page_ext_operations page_owner_ops = {
  47        .need = need_page_owner,
  48        .init = init_page_owner,
  49};
  50
  51void __reset_page_owner(struct page *page, unsigned int order)
  52{
  53        int i;
  54        struct page_ext *page_ext;
  55
  56        for (i = 0; i < (1 << order); i++) {
  57                page_ext = lookup_page_ext(page + i);
  58                if (unlikely(!page_ext))
  59                        continue;
  60                __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
  61        }
  62}
  63
  64void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
  65{
  66        struct page_ext *page_ext = lookup_page_ext(page);
  67
  68        struct stack_trace trace = {
  69                .nr_entries = 0,
  70                .max_entries = ARRAY_SIZE(page_ext->trace_entries),
  71                .entries = &page_ext->trace_entries[0],
  72                .skip = 3,
  73        };
  74
  75        if (unlikely(!page_ext))
  76                return;
  77
  78        save_stack_trace(&trace);
  79
  80        page_ext->order = order;
  81        page_ext->gfp_mask = gfp_mask;
  82        page_ext->nr_entries = trace.nr_entries;
  83        page_ext->last_migrate_reason = -1;
  84
  85        __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
  86}
  87
  88void __set_page_owner_migrate_reason(struct page *page, int reason)
  89{
  90        struct page_ext *page_ext = lookup_page_ext(page);
  91        if (unlikely(!page_ext))
  92                return;
  93
  94        page_ext->last_migrate_reason = reason;
  95}
  96
  97gfp_t __get_page_owner_gfp(struct page *page)
  98{
  99        struct page_ext *page_ext = lookup_page_ext(page);
 100        if (unlikely(!page_ext))
 101                /*
 102                 * The caller just returns 0 if no valid gfp
 103                 * So return 0 here too.
 104                 */
 105                return 0;
 106
 107        return page_ext->gfp_mask;
 108}
 109
 110void __copy_page_owner(struct page *oldpage, struct page *newpage)
 111{
 112        struct page_ext *old_ext = lookup_page_ext(oldpage);
 113        struct page_ext *new_ext = lookup_page_ext(newpage);
 114        int i;
 115
 116        if (unlikely(!old_ext || !new_ext))
 117                return;
 118
 119        new_ext->order = old_ext->order;
 120        new_ext->gfp_mask = old_ext->gfp_mask;
 121        new_ext->nr_entries = old_ext->nr_entries;
 122
 123        for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++)
 124                new_ext->trace_entries[i] = old_ext->trace_entries[i];
 125
 126        /*
 127         * We don't clear the bit on the oldpage as it's going to be freed
 128         * after migration. Until then, the info can be useful in case of
 129         * a bug, and the overal stats will be off a bit only temporarily.
 130         * Also, migrate_misplaced_transhuge_page() can still fail the
 131         * migration and then we want the oldpage to retain the info. But
 132         * in that case we also don't need to explicitly clear the info from
 133         * the new page, which will be freed.
 134         */
 135        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
 136}
 137
 138static ssize_t
 139print_page_owner(char __user *buf, size_t count, unsigned long pfn,
 140                struct page *page, struct page_ext *page_ext)
 141{
 142        int ret;
 143        int pageblock_mt, page_mt;
 144        char *kbuf;
 145        struct stack_trace trace = {
 146                .nr_entries = page_ext->nr_entries,
 147                .entries = &page_ext->trace_entries[0],
 148        };
 149
 150        kbuf = kmalloc(count, GFP_KERNEL);
 151        if (!kbuf)
 152                return -ENOMEM;
 153
 154        ret = snprintf(kbuf, count,
 155                        "Page allocated via order %u, mask %#x(%pGg)\n",
 156                        page_ext->order, page_ext->gfp_mask,
 157                        &page_ext->gfp_mask);
 158
 159        if (ret >= count)
 160                goto err;
 161
 162        /* Print information relevant to grouping pages by mobility */
 163        pageblock_mt = get_pageblock_migratetype(page);
 164        page_mt  = gfpflags_to_migratetype(page_ext->gfp_mask);
 165        ret += snprintf(kbuf + ret, count - ret,
 166                        "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
 167                        pfn,
 168                        migratetype_names[page_mt],
 169                        pfn >> pageblock_order,
 170                        migratetype_names[pageblock_mt],
 171                        page->flags, &page->flags);
 172
 173        if (ret >= count)
 174                goto err;
 175
 176        ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
 177        if (ret >= count)
 178                goto err;
 179
 180        if (page_ext->last_migrate_reason != -1) {
 181                ret += snprintf(kbuf + ret, count - ret,
 182                        "Page has been migrated, last migrate reason: %s\n",
 183                        migrate_reason_names[page_ext->last_migrate_reason]);
 184                if (ret >= count)
 185                        goto err;
 186        }
 187
 188        ret += snprintf(kbuf + ret, count - ret, "\n");
 189        if (ret >= count)
 190                goto err;
 191
 192        if (copy_to_user(buf, kbuf, ret))
 193                ret = -EFAULT;
 194
 195        kfree(kbuf);
 196        return ret;
 197
 198err:
 199        kfree(kbuf);
 200        return -ENOMEM;
 201}
 202
 203void __dump_page_owner(struct page *page)
 204{
 205        struct page_ext *page_ext = lookup_page_ext(page);
 206        struct stack_trace trace = {
 207                .nr_entries = page_ext->nr_entries,
 208                .entries = &page_ext->trace_entries[0],
 209        };
 210        gfp_t gfp_mask;
 211        int mt;
 212
 213        if (unlikely(!page_ext)) {
 214                pr_alert("There is not page extension available.\n");
 215                return;
 216        }
 217        gfp_mask = page_ext->gfp_mask;
 218        mt = gfpflags_to_migratetype(gfp_mask);
 219
 220        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
 221                pr_alert("page_owner info is not active (free page?)\n");
 222                return;
 223        }
 224
 225        pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
 226                 page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
 227        print_stack_trace(&trace, 0);
 228
 229        if (page_ext->last_migrate_reason != -1)
 230                pr_alert("page has been migrated, last migrate reason: %s\n",
 231                        migrate_reason_names[page_ext->last_migrate_reason]);
 232}
 233
 234static ssize_t
 235read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 236{
 237        unsigned long pfn;
 238        struct page *page;
 239        struct page_ext *page_ext;
 240
 241        if (!static_branch_unlikely(&page_owner_inited))
 242                return -EINVAL;
 243
 244        page = NULL;
 245        pfn = min_low_pfn + *ppos;
 246
 247        /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
 248        while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
 249                pfn++;
 250
 251        drain_all_pages(NULL);
 252
 253        /* Find an allocated page */
 254        for (; pfn < max_pfn; pfn++) {
 255                /*
 256                 * If the new page is in a new MAX_ORDER_NR_PAGES area,
 257                 * validate the area as existing, skip it if not
 258                 */
 259                if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
 260                        pfn += MAX_ORDER_NR_PAGES - 1;
 261                        continue;
 262                }
 263
 264                /* Check for holes within a MAX_ORDER area */
 265                if (!pfn_valid_within(pfn))
 266                        continue;
 267
 268                page = pfn_to_page(pfn);
 269                if (PageBuddy(page)) {
 270                        unsigned long freepage_order = page_order_unsafe(page);
 271
 272                        if (freepage_order < MAX_ORDER)
 273                                pfn += (1UL << freepage_order) - 1;
 274                        continue;
 275                }
 276
 277                page_ext = lookup_page_ext(page);
 278                if (unlikely(!page_ext))
 279                        continue;
 280
 281                /*
 282                 * Some pages could be missed by concurrent allocation or free,
 283                 * because we don't hold the zone lock.
 284                 */
 285                if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 286                        continue;
 287
 288                /* Record the next PFN to read in the file offset */
 289                *ppos = (pfn - min_low_pfn) + 1;
 290
 291                return print_page_owner(buf, count, pfn, page, page_ext);
 292        }
 293
 294        return 0;
 295}
 296
 297static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 298{
 299        struct page *page;
 300        struct page_ext *page_ext;
 301        unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
 302        unsigned long end_pfn = pfn + zone->spanned_pages;
 303        unsigned long count = 0;
 304
 305        /* Scan block by block. First and last block may be incomplete */
 306        pfn = zone->zone_start_pfn;
 307
 308        /*
 309         * Walk the zone in pageblock_nr_pages steps. If a page block spans
 310         * a zone boundary, it will be double counted between zones. This does
 311         * not matter as the mixed block count will still be correct
 312         */
 313        for (; pfn < end_pfn; ) {
 314                if (!pfn_valid(pfn)) {
 315                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
 316                        continue;
 317                }
 318
 319                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 320                block_end_pfn = min(block_end_pfn, end_pfn);
 321
 322                page = pfn_to_page(pfn);
 323
 324                for (; pfn < block_end_pfn; pfn++) {
 325                        if (!pfn_valid_within(pfn))
 326                                continue;
 327
 328                        page = pfn_to_page(pfn);
 329
 330                        if (page_zone(page) != zone)
 331                                continue;
 332
 333                        /*
 334                         * We are safe to check buddy flag and order, because
 335                         * this is init stage and only single thread runs.
 336                         */
 337                        if (PageBuddy(page)) {
 338                                pfn += (1UL << page_order(page)) - 1;
 339                                continue;
 340                        }
 341
 342                        if (PageReserved(page))
 343                                continue;
 344
 345                        page_ext = lookup_page_ext(page);
 346                        if (unlikely(!page_ext))
 347                                continue;
 348
 349                        /* Maybe overraping zone */
 350                        if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
 351                                continue;
 352
 353                        /* Found early allocated page */
 354                        set_page_owner(page, 0, 0);
 355                        count++;
 356                }
 357        }
 358
 359        pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
 360                pgdat->node_id, zone->name, count);
 361}
 362
 363static void init_zones_in_node(pg_data_t *pgdat)
 364{
 365        struct zone *zone;
 366        struct zone *node_zones = pgdat->node_zones;
 367        unsigned long flags;
 368
 369        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
 370                if (!populated_zone(zone))
 371                        continue;
 372
 373                spin_lock_irqsave(&zone->lock, flags);
 374                init_pages_in_zone(pgdat, zone);
 375                spin_unlock_irqrestore(&zone->lock, flags);
 376        }
 377}
 378
 379static void init_early_allocated_pages(void)
 380{
 381        pg_data_t *pgdat;
 382
 383        drain_all_pages(NULL);
 384        for_each_online_pgdat(pgdat)
 385                init_zones_in_node(pgdat);
 386}
 387
 388static const struct file_operations proc_page_owner_operations = {
 389        .read           = read_page_owner,
 390};
 391
 392static int __init pageowner_init(void)
 393{
 394        struct dentry *dentry;
 395
 396        if (!static_branch_unlikely(&page_owner_inited)) {
 397                pr_info("page_owner is disabled\n");
 398                return 0;
 399        }
 400
 401        dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
 402                        NULL, &proc_page_owner_operations);
 403        if (IS_ERR(dentry))
 404                return PTR_ERR(dentry);
 405
 406        return 0;
 407}
 408late_initcall(pageowner_init)
 409