linux/fs/proc/task_mmu.c
<<
>>
Prefs
   1#include <linux/mm.h>
   2#include <linux/hugetlb.h>
   3#include <linux/huge_mm.h>
   4#include <linux/mount.h>
   5#include <linux/seq_file.h>
   6#include <linux/highmem.h>
   7#include <linux/ptrace.h>
   8#include <linux/slab.h>
   9#include <linux/pagemap.h>
  10#include <linux/mempolicy.h>
  11#include <linux/rmap.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/mmu_notifier.h>
  15
  16#include <asm/elf.h>
  17#include <asm/uaccess.h>
  18#include <asm/tlbflush.h>
  19#include "internal.h"
  20
  21void task_mem(struct seq_file *m, struct mm_struct *mm)
  22{
  23        unsigned long data, text, lib, swap;
  24        unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  25
  26        /*
  27         * Note: to minimize their overhead, mm maintains hiwater_vm and
  28         * hiwater_rss only when about to *lower* total_vm or rss.  Any
  29         * collector of these hiwater stats must therefore get total_vm
  30         * and rss too, which will usually be the higher.  Barriers? not
  31         * worth the effort, such snapshots can always be inconsistent.
  32         */
  33        hiwater_vm = total_vm = mm->total_vm;
  34        if (hiwater_vm < mm->hiwater_vm)
  35                hiwater_vm = mm->hiwater_vm;
  36        hiwater_rss = total_rss = get_mm_rss(mm);
  37        if (hiwater_rss < mm->hiwater_rss)
  38                hiwater_rss = mm->hiwater_rss;
  39
  40        data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  41        text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  42        lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
  43        swap = get_mm_counter(mm, MM_SWAPENTS);
  44        seq_printf(m,
  45                "VmPeak:\t%8lu kB\n"
  46                "VmSize:\t%8lu kB\n"
  47                "VmLck:\t%8lu kB\n"
  48                "VmPin:\t%8lu kB\n"
  49                "VmHWM:\t%8lu kB\n"
  50                "VmRSS:\t%8lu kB\n"
  51                "VmData:\t%8lu kB\n"
  52                "VmStk:\t%8lu kB\n"
  53                "VmExe:\t%8lu kB\n"
  54                "VmLib:\t%8lu kB\n"
  55                "VmPTE:\t%8lu kB\n"
  56                "VmSwap:\t%8lu kB\n",
  57                hiwater_vm << (PAGE_SHIFT-10),
  58                total_vm << (PAGE_SHIFT-10),
  59                mm->locked_vm << (PAGE_SHIFT-10),
  60                mm->pinned_vm << (PAGE_SHIFT-10),
  61                hiwater_rss << (PAGE_SHIFT-10),
  62                total_rss << (PAGE_SHIFT-10),
  63                data << (PAGE_SHIFT-10),
  64                mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  65                (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
  66                swap << (PAGE_SHIFT-10));
  67}
  68
  69unsigned long task_vsize(struct mm_struct *mm)
  70{
  71        return PAGE_SIZE * mm->total_vm;
  72}
  73
  74unsigned long task_statm(struct mm_struct *mm,
  75                         unsigned long *shared, unsigned long *text,
  76                         unsigned long *data, unsigned long *resident)
  77{
  78        *shared = get_mm_counter(mm, MM_FILEPAGES);
  79        *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  80                                                                >> PAGE_SHIFT;
  81        *data = mm->total_vm - mm->shared_vm;
  82        *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  83        return mm->total_vm;
  84}
  85
  86static void pad_len_spaces(struct seq_file *m, int len)
  87{
  88        len = 25 + sizeof(void*) * 6 - len;
  89        if (len < 1)
  90                len = 1;
  91        seq_printf(m, "%*c", len, ' ');
  92}
  93
  94#ifdef CONFIG_NUMA
  95/*
  96 * These functions are for numa_maps but called in generic **maps seq_file
  97 * ->start(), ->stop() ops.
  98 *
  99 * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
 100 * Each mempolicy object is controlled by reference counting. The problem here
 101 * is how to avoid accessing dead mempolicy object.
 102 *
 103 * Because we're holding mmap_sem while reading seq_file, it's safe to access
 104 * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
 105 *
 106 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
 107 * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
 108 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
 109 * gurantee the task never exits under us. But taking task_lock() around
 110 * get_vma_plicy() causes lock order problem.
 111 *
 112 * To access task->mempolicy without lock, we hold a reference count of an
 113 * object pointed by task->mempolicy and remember it. This will guarantee
 114 * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
 115 */
 116static void hold_task_mempolicy(struct proc_maps_private *priv)
 117{
 118        struct task_struct *task = priv->task;
 119
 120        task_lock(task);
 121        priv->task_mempolicy = task->mempolicy;
 122        mpol_get(priv->task_mempolicy);
 123        task_unlock(task);
 124}
 125static void release_task_mempolicy(struct proc_maps_private *priv)
 126{
 127        mpol_put(priv->task_mempolicy);
 128}
 129#else
 130static void hold_task_mempolicy(struct proc_maps_private *priv)
 131{
 132}
 133static void release_task_mempolicy(struct proc_maps_private *priv)
 134{
 135}
 136#endif
 137
 138static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
 139{
 140        if (vma && vma != priv->tail_vma) {
 141                struct mm_struct *mm = vma->vm_mm;
 142                release_task_mempolicy(priv);
 143                up_read(&mm->mmap_sem);
 144                mmput(mm);
 145        }
 146}
 147
 148static void *m_start(struct seq_file *m, loff_t *pos)
 149{
 150        struct proc_maps_private *priv = m->private;
 151        unsigned long last_addr = m->version;
 152        struct mm_struct *mm;
 153        struct vm_area_struct *vma, *tail_vma = NULL;
 154        loff_t l = *pos;
 155
 156        /* Clear the per syscall fields in priv */
 157        priv->task = NULL;
 158        priv->tail_vma = NULL;
 159
 160        /*
 161         * We remember last_addr rather than next_addr to hit with
 162         * mmap_cache most of the time. We have zero last_addr at
 163         * the beginning and also after lseek. We will have -1 last_addr
 164         * after the end of the vmas.
 165         */
 166
 167        if (last_addr == -1UL)
 168                return NULL;
 169
 170        priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
 171        if (!priv->task)
 172                return ERR_PTR(-ESRCH);
 173
 174        mm = mm_access(priv->task, PTRACE_MODE_READ);
 175        if (!mm || IS_ERR(mm))
 176                return mm;
 177        down_read(&mm->mmap_sem);
 178
 179        tail_vma = get_gate_vma(priv->task->mm);
 180        priv->tail_vma = tail_vma;
 181        hold_task_mempolicy(priv);
 182        /* Start with last addr hint */
 183        vma = find_vma(mm, last_addr);
 184        if (last_addr && vma) {
 185                vma = vma->vm_next;
 186                goto out;
 187        }
 188
 189        /*
 190         * Check the vma index is within the range and do
 191         * sequential scan until m_index.
 192         */
 193        vma = NULL;
 194        if ((unsigned long)l < mm->map_count) {
 195                vma = mm->mmap;
 196                while (l-- && vma)
 197                        vma = vma->vm_next;
 198                goto out;
 199        }
 200
 201        if (l != mm->map_count)
 202                tail_vma = NULL; /* After gate vma */
 203
 204out:
 205        if (vma)
 206                return vma;
 207
 208        release_task_mempolicy(priv);
 209        /* End of vmas has been reached */
 210        m->version = (tail_vma != NULL)? 0: -1UL;
 211        up_read(&mm->mmap_sem);
 212        mmput(mm);
 213        return tail_vma;
 214}
 215
 216static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 217{
 218        struct proc_maps_private *priv = m->private;
 219        struct vm_area_struct *vma = v;
 220        struct vm_area_struct *tail_vma = priv->tail_vma;
 221
 222        (*pos)++;
 223        if (vma && (vma != tail_vma) && vma->vm_next)
 224                return vma->vm_next;
 225        vma_stop(priv, vma);
 226        return (vma != tail_vma)? tail_vma: NULL;
 227}
 228
 229static void m_stop(struct seq_file *m, void *v)
 230{
 231        struct proc_maps_private *priv = m->private;
 232        struct vm_area_struct *vma = v;
 233
 234        if (!IS_ERR(vma))
 235                vma_stop(priv, vma);
 236        if (priv->task)
 237                put_task_struct(priv->task);
 238}
 239
 240static int do_maps_open(struct inode *inode, struct file *file,
 241                        const struct seq_operations *ops)
 242{
 243        struct proc_maps_private *priv;
 244        int ret = -ENOMEM;
 245        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 246        if (priv) {
 247                priv->pid = proc_pid(inode);
 248                ret = seq_open(file, ops);
 249                if (!ret) {
 250                        struct seq_file *m = file->private_data;
 251                        m->private = priv;
 252                } else {
 253                        kfree(priv);
 254                }
 255        }
 256        return ret;
 257}
 258
 259static void
 260show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 261{
 262        struct mm_struct *mm = vma->vm_mm;
 263        struct file *file = vma->vm_file;
 264        struct proc_maps_private *priv = m->private;
 265        struct task_struct *task = priv->task;
 266        vm_flags_t flags = vma->vm_flags;
 267        unsigned long ino = 0;
 268        unsigned long long pgoff = 0;
 269        unsigned long start, end;
 270        dev_t dev = 0;
 271        int len;
 272        const char *name = NULL;
 273
 274        if (file) {
 275                struct inode *inode = file_inode(vma->vm_file);
 276                dev = inode->i_sb->s_dev;
 277                ino = inode->i_ino;
 278                pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 279        }
 280
 281        /* We don't show the stack guard page in /proc/maps */
 282        start = vma->vm_start;
 283        if (stack_guard_page_start(vma, start))
 284                start += PAGE_SIZE;
 285        end = vma->vm_end;
 286        if (stack_guard_page_end(vma, end))
 287                end -= PAGE_SIZE;
 288
 289        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
 290                        start,
 291                        end,
 292                        flags & VM_READ ? 'r' : '-',
 293                        flags & VM_WRITE ? 'w' : '-',
 294                        flags & VM_EXEC ? 'x' : '-',
 295                        flags & VM_MAYSHARE ? 's' : 'p',
 296                        pgoff,
 297                        MAJOR(dev), MINOR(dev), ino, &len);
 298
 299        /*
 300         * Print the dentry name for named mappings, and a
 301         * special [heap] marker for the heap:
 302         */
 303        if (file) {
 304                pad_len_spaces(m, len);
 305                seq_path(m, &file->f_path, "\n");
 306                goto done;
 307        }
 308
 309        name = arch_vma_name(vma);
 310        if (!name) {
 311                pid_t tid;
 312
 313                if (!mm) {
 314                        name = "[vdso]";
 315                        goto done;
 316                }
 317
 318                if (vma->vm_start <= mm->brk &&
 319                    vma->vm_end >= mm->start_brk) {
 320                        name = "[heap]";
 321                        goto done;
 322                }
 323
 324                tid = vm_is_stack(task, vma, is_pid);
 325
 326                if (tid != 0) {
 327                        /*
 328                         * Thread stack in /proc/PID/task/TID/maps or
 329                         * the main process stack.
 330                         */
 331                        if (!is_pid || (vma->vm_start <= mm->start_stack &&
 332                            vma->vm_end >= mm->start_stack)) {
 333                                name = "[stack]";
 334                        } else {
 335                                /* Thread stack in /proc/PID/maps */
 336                                pad_len_spaces(m, len);
 337                                seq_printf(m, "[stack:%d]", tid);
 338                        }
 339                }
 340        }
 341
 342done:
 343        if (name) {
 344                pad_len_spaces(m, len);
 345                seq_puts(m, name);
 346        }
 347        seq_putc(m, '\n');
 348}
 349
 350static int show_map(struct seq_file *m, void *v, int is_pid)
 351{
 352        struct vm_area_struct *vma = v;
 353        struct proc_maps_private *priv = m->private;
 354        struct task_struct *task = priv->task;
 355
 356        show_map_vma(m, vma, is_pid);
 357
 358        if (m->count < m->size)  /* vma is copied successfully */
 359                m->version = (vma != get_gate_vma(task->mm))
 360                        ? vma->vm_start : 0;
 361        return 0;
 362}
 363
 364static int show_pid_map(struct seq_file *m, void *v)
 365{
 366        return show_map(m, v, 1);
 367}
 368
 369static int show_tid_map(struct seq_file *m, void *v)
 370{
 371        return show_map(m, v, 0);
 372}
 373
 374static const struct seq_operations proc_pid_maps_op = {
 375        .start  = m_start,
 376        .next   = m_next,
 377        .stop   = m_stop,
 378        .show   = show_pid_map
 379};
 380
 381static const struct seq_operations proc_tid_maps_op = {
 382        .start  = m_start,
 383        .next   = m_next,
 384        .stop   = m_stop,
 385        .show   = show_tid_map
 386};
 387
 388static int pid_maps_open(struct inode *inode, struct file *file)
 389{
 390        return do_maps_open(inode, file, &proc_pid_maps_op);
 391}
 392
 393static int tid_maps_open(struct inode *inode, struct file *file)
 394{
 395        return do_maps_open(inode, file, &proc_tid_maps_op);
 396}
 397
 398const struct file_operations proc_pid_maps_operations = {
 399        .open           = pid_maps_open,
 400        .read           = seq_read,
 401        .llseek         = seq_lseek,
 402        .release        = seq_release_private,
 403};
 404
 405const struct file_operations proc_tid_maps_operations = {
 406        .open           = tid_maps_open,
 407        .read           = seq_read,
 408        .llseek         = seq_lseek,
 409        .release        = seq_release_private,
 410};
 411
 412/*
 413 * Proportional Set Size(PSS): my share of RSS.
 414 *
 415 * PSS of a process is the count of pages it has in memory, where each
 416 * page is divided by the number of processes sharing it.  So if a
 417 * process has 1000 pages all to itself, and 1000 shared with one other
 418 * process, its PSS will be 1500.
 419 *
 420 * To keep (accumulated) division errors low, we adopt a 64bit
 421 * fixed-point pss counter to minimize division errors. So (pss >>
 422 * PSS_SHIFT) would be the real byte count.
 423 *
 424 * A shift of 12 before division means (assuming 4K page size):
 425 *      - 1M 3-user-pages add up to 8KB errors;
 426 *      - supports mapcount up to 2^24, or 16M;
 427 *      - supports PSS up to 2^52 bytes, or 4PB.
 428 */
 429#define PSS_SHIFT 12
 430
 431#ifdef CONFIG_PROC_PAGE_MONITOR
 432struct mem_size_stats {
 433        struct vm_area_struct *vma;
 434        unsigned long resident;
 435        unsigned long shared_clean;
 436        unsigned long shared_dirty;
 437        unsigned long private_clean;
 438        unsigned long private_dirty;
 439        unsigned long referenced;
 440        unsigned long anonymous;
 441        unsigned long anonymous_thp;
 442        unsigned long swap;
 443        unsigned long nonlinear;
 444        u64 pss;
 445};
 446
 447
 448static void smaps_pte_entry(pte_t ptent, unsigned long addr,
 449                unsigned long ptent_size, struct mm_walk *walk)
 450{
 451        struct mem_size_stats *mss = walk->private;
 452        struct vm_area_struct *vma = mss->vma;
 453        pgoff_t pgoff = linear_page_index(vma, addr);
 454        struct page *page = NULL;
 455        int mapcount;
 456
 457        if (pte_present(ptent)) {
 458                page = vm_normal_page(vma, addr, ptent);
 459        } else if (is_swap_pte(ptent)) {
 460                swp_entry_t swpent = pte_to_swp_entry(ptent);
 461
 462                if (!non_swap_entry(swpent))
 463                        mss->swap += ptent_size;
 464                else if (is_migration_entry(swpent))
 465                        page = migration_entry_to_page(swpent);
 466        } else if (pte_file(ptent)) {
 467                if (pte_to_pgoff(ptent) != pgoff)
 468                        mss->nonlinear += ptent_size;
 469        }
 470
 471        if (!page)
 472                return;
 473
 474        if (PageAnon(page))
 475                mss->anonymous += ptent_size;
 476
 477        if (page->index != pgoff)
 478                mss->nonlinear += ptent_size;
 479
 480        mss->resident += ptent_size;
 481        /* Accumulate the size in pages that have been accessed. */
 482        if (pte_young(ptent) || PageReferenced(page))
 483                mss->referenced += ptent_size;
 484        mapcount = page_mapcount(page);
 485        if (mapcount >= 2) {
 486                if (pte_dirty(ptent) || PageDirty(page))
 487                        mss->shared_dirty += ptent_size;
 488                else
 489                        mss->shared_clean += ptent_size;
 490                mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
 491        } else {
 492                if (pte_dirty(ptent) || PageDirty(page))
 493                        mss->private_dirty += ptent_size;
 494                else
 495                        mss->private_clean += ptent_size;
 496                mss->pss += (ptent_size << PSS_SHIFT);
 497        }
 498}
 499
 500static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 501                           struct mm_walk *walk)
 502{
 503        struct mem_size_stats *mss = walk->private;
 504        struct vm_area_struct *vma = mss->vma;
 505        pte_t *pte;
 506        spinlock_t *ptl;
 507
 508        if (pmd_trans_huge_lock(pmd, vma) == 1) {
 509                smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
 510                spin_unlock(&walk->mm->page_table_lock);
 511                mss->anonymous_thp += HPAGE_PMD_SIZE;
 512                return 0;
 513        }
 514
 515        if (pmd_trans_unstable(pmd))
 516                return 0;
 517        /*
 518         * The mmap_sem held all the way back in m_start() is what
 519         * keeps khugepaged out of here and from collapsing things
 520         * in here.
 521         */
 522        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 523        for (; addr != end; pte++, addr += PAGE_SIZE)
 524                smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
 525        pte_unmap_unlock(pte - 1, ptl);
 526        cond_resched();
 527        return 0;
 528}
 529
 530static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 531{
 532        /*
 533         * Don't forget to update Documentation/ on changes.
 534         */
 535        static const char mnemonics[BITS_PER_LONG][2] = {
 536                /*
 537                 * In case if we meet a flag we don't know about.
 538                 */
 539                [0 ... (BITS_PER_LONG-1)] = "??",
 540
 541                [ilog2(VM_READ)]        = "rd",
 542                [ilog2(VM_WRITE)]       = "wr",
 543                [ilog2(VM_EXEC)]        = "ex",
 544                [ilog2(VM_SHARED)]      = "sh",
 545                [ilog2(VM_MAYREAD)]     = "mr",
 546                [ilog2(VM_MAYWRITE)]    = "mw",
 547                [ilog2(VM_MAYEXEC)]     = "me",
 548                [ilog2(VM_MAYSHARE)]    = "ms",
 549                [ilog2(VM_GROWSDOWN)]   = "gd",
 550                [ilog2(VM_PFNMAP)]      = "pf",
 551                [ilog2(VM_DENYWRITE)]   = "dw",
 552                [ilog2(VM_LOCKED)]      = "lo",
 553                [ilog2(VM_IO)]          = "io",
 554                [ilog2(VM_SEQ_READ)]    = "sr",
 555                [ilog2(VM_RAND_READ)]   = "rr",
 556                [ilog2(VM_DONTCOPY)]    = "dc",
 557                [ilog2(VM_DONTEXPAND)]  = "de",
 558                [ilog2(VM_ACCOUNT)]     = "ac",
 559                [ilog2(VM_NORESERVE)]   = "nr",
 560                [ilog2(VM_HUGETLB)]     = "ht",
 561                [ilog2(VM_NONLINEAR)]   = "nl",
 562                [ilog2(VM_ARCH_1)]      = "ar",
 563                [ilog2(VM_DONTDUMP)]    = "dd",
 564                [ilog2(VM_MIXEDMAP)]    = "mm",
 565                [ilog2(VM_HUGEPAGE)]    = "hg",
 566                [ilog2(VM_NOHUGEPAGE)]  = "nh",
 567                [ilog2(VM_MERGEABLE)]   = "mg",
 568        };
 569        size_t i;
 570
 571        seq_puts(m, "VmFlags: ");
 572        for (i = 0; i < BITS_PER_LONG; i++) {
 573                if (vma->vm_flags & (1UL << i)) {
 574                        seq_printf(m, "%c%c ",
 575                                   mnemonics[i][0], mnemonics[i][1]);
 576                }
 577        }
 578        seq_putc(m, '\n');
 579}
 580
 581static int show_smap(struct seq_file *m, void *v, int is_pid)
 582{
 583        struct proc_maps_private *priv = m->private;
 584        struct task_struct *task = priv->task;
 585        struct vm_area_struct *vma = v;
 586        struct mem_size_stats mss;
 587        struct mm_walk smaps_walk = {
 588                .pmd_entry = smaps_pte_range,
 589                .mm = vma->vm_mm,
 590                .private = &mss,
 591        };
 592
 593        memset(&mss, 0, sizeof mss);
 594        mss.vma = vma;
 595        /* mmap_sem is held in m_start */
 596        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
 597                walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
 598
 599        show_map_vma(m, vma, is_pid);
 600
 601        seq_printf(m,
 602                   "Size:           %8lu kB\n"
 603                   "Rss:            %8lu kB\n"
 604                   "Pss:            %8lu kB\n"
 605                   "Shared_Clean:   %8lu kB\n"
 606                   "Shared_Dirty:   %8lu kB\n"
 607                   "Private_Clean:  %8lu kB\n"
 608                   "Private_Dirty:  %8lu kB\n"
 609                   "Referenced:     %8lu kB\n"
 610                   "Anonymous:      %8lu kB\n"
 611                   "AnonHugePages:  %8lu kB\n"
 612                   "Swap:           %8lu kB\n"
 613                   "KernelPageSize: %8lu kB\n"
 614                   "MMUPageSize:    %8lu kB\n"
 615                   "Locked:         %8lu kB\n",
 616                   (vma->vm_end - vma->vm_start) >> 10,
 617                   mss.resident >> 10,
 618                   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 619                   mss.shared_clean  >> 10,
 620                   mss.shared_dirty  >> 10,
 621                   mss.private_clean >> 10,
 622                   mss.private_dirty >> 10,
 623                   mss.referenced >> 10,
 624                   mss.anonymous >> 10,
 625                   mss.anonymous_thp >> 10,
 626                   mss.swap >> 10,
 627                   vma_kernel_pagesize(vma) >> 10,
 628                   vma_mmu_pagesize(vma) >> 10,
 629                   (vma->vm_flags & VM_LOCKED) ?
 630                        (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 631
 632        if (vma->vm_flags & VM_NONLINEAR)
 633                seq_printf(m, "Nonlinear:      %8lu kB\n",
 634                                mss.nonlinear >> 10);
 635
 636        show_smap_vma_flags(m, vma);
 637
 638        if (m->count < m->size)  /* vma is copied successfully */
 639                m->version = (vma != get_gate_vma(task->mm))
 640                        ? vma->vm_start : 0;
 641        return 0;
 642}
 643
 644static int show_pid_smap(struct seq_file *m, void *v)
 645{
 646        return show_smap(m, v, 1);
 647}
 648
 649static int show_tid_smap(struct seq_file *m, void *v)
 650{
 651        return show_smap(m, v, 0);
 652}
 653
 654static const struct seq_operations proc_pid_smaps_op = {
 655        .start  = m_start,
 656        .next   = m_next,
 657        .stop   = m_stop,
 658        .show   = show_pid_smap
 659};
 660
 661static const struct seq_operations proc_tid_smaps_op = {
 662        .start  = m_start,
 663        .next   = m_next,
 664        .stop   = m_stop,
 665        .show   = show_tid_smap
 666};
 667
 668static int pid_smaps_open(struct inode *inode, struct file *file)
 669{
 670        return do_maps_open(inode, file, &proc_pid_smaps_op);
 671}
 672
 673static int tid_smaps_open(struct inode *inode, struct file *file)
 674{
 675        return do_maps_open(inode, file, &proc_tid_smaps_op);
 676}
 677
 678const struct file_operations proc_pid_smaps_operations = {
 679        .open           = pid_smaps_open,
 680        .read           = seq_read,
 681        .llseek         = seq_lseek,
 682        .release        = seq_release_private,
 683};
 684
 685const struct file_operations proc_tid_smaps_operations = {
 686        .open           = tid_smaps_open,
 687        .read           = seq_read,
 688        .llseek         = seq_lseek,
 689        .release        = seq_release_private,
 690};
 691
 692/*
 693 * We do not want to have constant page-shift bits sitting in
 694 * pagemap entries and are about to reuse them some time soon.
 695 *
 696 * Here's the "migration strategy":
 697 * 1. when the system boots these bits remain what they are,
 698 *    but a warning about future change is printed in log;
 699 * 2. once anyone clears soft-dirty bits via clear_refs file,
 700 *    these flag is set to denote, that user is aware of the
 701 *    new API and those page-shift bits change their meaning.
 702 *    The respective warning is printed in dmesg;
 703 * 3. In a couple of releases we will remove all the mentions
 704 *    of page-shift in pagemap entries.
 705 */
 706
 707static bool soft_dirty_cleared __read_mostly;
 708
 709enum clear_refs_types {
 710        CLEAR_REFS_ALL = 1,
 711        CLEAR_REFS_ANON,
 712        CLEAR_REFS_MAPPED,
 713        CLEAR_REFS_SOFT_DIRTY,
 714        CLEAR_REFS_LAST,
 715};
 716
 717struct clear_refs_private {
 718        struct vm_area_struct *vma;
 719        enum clear_refs_types type;
 720};
 721
 722static inline void clear_soft_dirty(struct vm_area_struct *vma,
 723                unsigned long addr, pte_t *pte)
 724{
 725#ifdef CONFIG_MEM_SOFT_DIRTY
 726        /*
 727         * The soft-dirty tracker uses #PF-s to catch writes
 728         * to pages, so write-protect the pte as well. See the
 729         * Documentation/vm/soft-dirty.txt for full description
 730         * of how soft-dirty works.
 731         */
 732        pte_t ptent = *pte;
 733
 734        if (pte_present(ptent)) {
 735                ptent = pte_wrprotect(ptent);
 736                ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
 737        } else if (is_swap_pte(ptent)) {
 738                ptent = pte_swp_clear_soft_dirty(ptent);
 739        } else if (pte_file(ptent)) {
 740                ptent = pte_file_clear_soft_dirty(ptent);
 741        }
 742
 743        if (vma->vm_flags & VM_SOFTDIRTY)
 744                vma->vm_flags &= ~VM_SOFTDIRTY;
 745
 746        set_pte_at(vma->vm_mm, addr, pte, ptent);
 747#endif
 748}
 749
 750static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 751                                unsigned long end, struct mm_walk *walk)
 752{
 753        struct clear_refs_private *cp = walk->private;
 754        struct vm_area_struct *vma = cp->vma;
 755        pte_t *pte, ptent;
 756        spinlock_t *ptl;
 757        struct page *page;
 758
 759        split_huge_page_pmd(vma, addr, pmd);
 760        if (pmd_trans_unstable(pmd))
 761                return 0;
 762
 763        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 764        for (; addr != end; pte++, addr += PAGE_SIZE) {
 765                ptent = *pte;
 766
 767                if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 768                        clear_soft_dirty(vma, addr, pte);
 769                        continue;
 770                }
 771
 772                if (!pte_present(ptent))
 773                        continue;
 774
 775                page = vm_normal_page(vma, addr, ptent);
 776                if (!page)
 777                        continue;
 778
 779                /* Clear accessed and referenced bits. */
 780                ptep_test_and_clear_young(vma, addr, pte);
 781                ClearPageReferenced(page);
 782        }
 783        pte_unmap_unlock(pte - 1, ptl);
 784        cond_resched();
 785        return 0;
 786}
 787
 788static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 789                                size_t count, loff_t *ppos)
 790{
 791        struct task_struct *task;
 792        char buffer[PROC_NUMBUF];
 793        struct mm_struct *mm;
 794        struct vm_area_struct *vma;
 795        enum clear_refs_types type;
 796        int itype;
 797        int rv;
 798
 799        memset(buffer, 0, sizeof(buffer));
 800        if (count > sizeof(buffer) - 1)
 801                count = sizeof(buffer) - 1;
 802        if (copy_from_user(buffer, buf, count))
 803                return -EFAULT;
 804        rv = kstrtoint(strstrip(buffer), 10, &itype);
 805        if (rv < 0)
 806                return rv;
 807        type = (enum clear_refs_types)itype;
 808        if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
 809                return -EINVAL;
 810
 811        if (type == CLEAR_REFS_SOFT_DIRTY) {
 812                soft_dirty_cleared = true;
 813                pr_warn_once("The pagemap bits 55-60 has changed their meaning! "
 814                                "See the linux/Documentation/vm/pagemap.txt for details.\n");
 815        }
 816
 817        task = get_proc_task(file_inode(file));
 818        if (!task)
 819                return -ESRCH;
 820        mm = get_task_mm(task);
 821        if (mm) {
 822                struct clear_refs_private cp = {
 823                        .type = type,
 824                };
 825                struct mm_walk clear_refs_walk = {
 826                        .pmd_entry = clear_refs_pte_range,
 827                        .mm = mm,
 828                        .private = &cp,
 829                };
 830                down_read(&mm->mmap_sem);
 831                if (type == CLEAR_REFS_SOFT_DIRTY)
 832                        mmu_notifier_invalidate_range_start(mm, 0, -1);
 833                for (vma = mm->mmap; vma; vma = vma->vm_next) {
 834                        cp.vma = vma;
 835                        if (is_vm_hugetlb_page(vma))
 836                                continue;
 837                        /*
 838                         * Writing 1 to /proc/pid/clear_refs affects all pages.
 839                         *
 840                         * Writing 2 to /proc/pid/clear_refs only affects
 841                         * Anonymous pages.
 842                         *
 843                         * Writing 3 to /proc/pid/clear_refs only affects file
 844                         * mapped pages.
 845                         */
 846                        if (type == CLEAR_REFS_ANON && vma->vm_file)
 847                                continue;
 848                        if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
 849                                continue;
 850                        walk_page_range(vma->vm_start, vma->vm_end,
 851                                        &clear_refs_walk);
 852                }
 853                if (type == CLEAR_REFS_SOFT_DIRTY)
 854                        mmu_notifier_invalidate_range_end(mm, 0, -1);
 855                flush_tlb_mm(mm);
 856                up_read(&mm->mmap_sem);
 857                mmput(mm);
 858        }
 859        put_task_struct(task);
 860
 861        return count;
 862}
 863
 864const struct file_operations proc_clear_refs_operations = {
 865        .write          = clear_refs_write,
 866        .llseek         = noop_llseek,
 867};
 868
 869typedef struct {
 870        u64 pme;
 871} pagemap_entry_t;
 872
 873struct pagemapread {
 874        int pos, len;           /* units: PM_ENTRY_BYTES, not bytes */
 875        pagemap_entry_t *buffer;
 876        bool v2;
 877};
 878
 879#define PAGEMAP_WALK_SIZE       (PMD_SIZE)
 880#define PAGEMAP_WALK_MASK       (PMD_MASK)
 881
 882#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
 883#define PM_STATUS_BITS      3
 884#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
 885#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
 886#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
 887#define PM_PSHIFT_BITS      6
 888#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
 889#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
 890#define __PM_PSHIFT(x)      (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
 891#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
 892#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
 893/* in "new" pagemap pshift bits are occupied with more status bits */
 894#define PM_STATUS2(v2, x)   (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
 895
 896#define __PM_SOFT_DIRTY      (1LL)
 897#define PM_PRESENT          PM_STATUS(4LL)
 898#define PM_SWAP             PM_STATUS(2LL)
 899#define PM_FILE             PM_STATUS(1LL)
 900#define PM_NOT_PRESENT(v2)  PM_STATUS2(v2, 0)
 901#define PM_END_OF_BUFFER    1
 902
 903static inline pagemap_entry_t make_pme(u64 val)
 904{
 905        return (pagemap_entry_t) { .pme = val };
 906}
 907
 908static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
 909                          struct pagemapread *pm)
 910{
 911        pm->buffer[pm->pos++] = *pme;
 912        if (pm->pos >= pm->len)
 913                return PM_END_OF_BUFFER;
 914        return 0;
 915}
 916
 917static int pagemap_pte_hole(unsigned long start, unsigned long end,
 918                                struct mm_walk *walk)
 919{
 920        struct pagemapread *pm = walk->private;
 921        unsigned long addr;
 922        int err = 0;
 923        pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
 924
 925        for (addr = start; addr < end; addr += PAGE_SIZE) {
 926                err = add_to_pagemap(addr, &pme, pm);
 927                if (err)
 928                        break;
 929        }
 930        return err;
 931}
 932
 933static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 934                struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 935{
 936        u64 frame, flags;
 937        struct page *page = NULL;
 938        int flags2 = 0;
 939
 940        if (pte_present(pte)) {
 941                frame = pte_pfn(pte);
 942                flags = PM_PRESENT;
 943                page = vm_normal_page(vma, addr, pte);
 944                if (pte_soft_dirty(pte))
 945                        flags2 |= __PM_SOFT_DIRTY;
 946        } else if (is_swap_pte(pte)) {
 947                swp_entry_t entry;
 948                if (pte_swp_soft_dirty(pte))
 949                        flags2 |= __PM_SOFT_DIRTY;
 950                entry = pte_to_swp_entry(pte);
 951                frame = swp_type(entry) |
 952                        (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
 953                flags = PM_SWAP;
 954                if (is_migration_entry(entry))
 955                        page = migration_entry_to_page(entry);
 956        } else {
 957                if (vma->vm_flags & VM_SOFTDIRTY)
 958                        flags2 |= __PM_SOFT_DIRTY;
 959                *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
 960                return;
 961        }
 962
 963        if (page && !PageAnon(page))
 964                flags |= PM_FILE;
 965        if ((vma->vm_flags & VM_SOFTDIRTY))
 966                flags2 |= __PM_SOFT_DIRTY;
 967
 968        *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
 969}
 970
 971#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 972static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 973                pmd_t pmd, int offset, int pmd_flags2)
 974{
 975        /*
 976         * Currently pmd for thp is always present because thp can not be
 977         * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
 978         * This if-check is just to prepare for future implementation.
 979         */
 980        if (pmd_present(pmd))
 981                *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
 982                                | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
 983        else
 984                *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
 985}
 986#else
 987static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 988                pmd_t pmd, int offset, int pmd_flags2)
 989{
 990}
 991#endif
 992
 993static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 994                             struct mm_walk *walk)
 995{
 996        struct vm_area_struct *vma;
 997        struct pagemapread *pm = walk->private;
 998        pte_t *pte;
 999        int err = 0;
1000        pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1001
1002        /* find the first VMA at or above 'addr' */
1003        vma = find_vma(walk->mm, addr);
1004        if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
1005                int pmd_flags2;
1006
1007                if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
1008                        pmd_flags2 = __PM_SOFT_DIRTY;
1009                else
1010                        pmd_flags2 = 0;
1011
1012                for (; addr != end; addr += PAGE_SIZE) {
1013                        unsigned long offset;
1014
1015                        offset = (addr & ~PAGEMAP_WALK_MASK) >>
1016                                        PAGE_SHIFT;
1017                        thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
1018                        err = add_to_pagemap(addr, &pme, pm);
1019                        if (err)
1020                                break;
1021                }
1022                spin_unlock(&walk->mm->page_table_lock);
1023                return err;
1024        }
1025
1026        if (pmd_trans_unstable(pmd))
1027                return 0;
1028        for (; addr != end; addr += PAGE_SIZE) {
1029                int flags2;
1030
1031                /* check to see if we've left 'vma' behind
1032                 * and need a new, higher one */
1033                if (vma && (addr >= vma->vm_end)) {
1034                        vma = find_vma(walk->mm, addr);
1035                        if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1036                                flags2 = __PM_SOFT_DIRTY;
1037                        else
1038                                flags2 = 0;
1039                        pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
1040                }
1041
1042                /* check that 'vma' actually covers this address,
1043                 * and that it isn't a huge page vma */
1044                if (vma && (vma->vm_start <= addr) &&
1045                    !is_vm_hugetlb_page(vma)) {
1046                        pte = pte_offset_map(pmd, addr);
1047                        pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1048                        /* unmap before userspace copy */
1049                        pte_unmap(pte);
1050                }
1051                err = add_to_pagemap(addr, &pme, pm);
1052                if (err)
1053                        return err;
1054        }
1055
1056        cond_resched();
1057
1058        return err;
1059}
1060
1061#ifdef CONFIG_HUGETLB_PAGE
1062static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1063                                        pte_t pte, int offset, int flags2)
1064{
1065        if (pte_present(pte))
1066                *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)        |
1067                                PM_STATUS2(pm->v2, flags2)              |
1068                                PM_PRESENT);
1069        else
1070                *pme = make_pme(PM_NOT_PRESENT(pm->v2)                  |
1071                                PM_STATUS2(pm->v2, flags2));
1072}
1073
1074/* This function walks within one hugetlb entry in the single call */
1075static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1076                                 unsigned long addr, unsigned long end,
1077                                 struct mm_walk *walk)
1078{
1079        struct pagemapread *pm = walk->private;
1080        struct vm_area_struct *vma;
1081        int err = 0;
1082        int flags2;
1083        pagemap_entry_t pme;
1084
1085        vma = find_vma(walk->mm, addr);
1086        WARN_ON_ONCE(!vma);
1087
1088        if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1089                flags2 = __PM_SOFT_DIRTY;
1090        else
1091                flags2 = 0;
1092
1093        for (; addr != end; addr += PAGE_SIZE) {
1094                int offset = (addr & ~hmask) >> PAGE_SHIFT;
1095                huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
1096                err = add_to_pagemap(addr, &pme, pm);
1097                if (err)
1098                        return err;
1099        }
1100
1101        cond_resched();
1102
1103        return err;
1104}
1105#endif /* HUGETLB_PAGE */
1106
1107/*
1108 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1109 *
1110 * For each page in the address space, this file contains one 64-bit entry
1111 * consisting of the following:
1112 *
1113 * Bits 0-54  page frame number (PFN) if present
1114 * Bits 0-4   swap type if swapped
1115 * Bits 5-54  swap offset if swapped
1116 * Bits 55-60 page shift (page size = 1<<page shift)
1117 * Bit  61    page is file-page or shared-anon
1118 * Bit  62    page swapped
1119 * Bit  63    page present
1120 *
1121 * If the page is not present but in swap, then the PFN contains an
1122 * encoding of the swap file number and the page's offset into the
1123 * swap. Unmapped pages return a null PFN. This allows determining
1124 * precisely which pages are mapped (or in swap) and comparing mapped
1125 * pages between processes.
1126 *
1127 * Efficient users of this interface will use /proc/pid/maps to
1128 * determine which areas of memory are actually mapped and llseek to
1129 * skip over unmapped regions.
1130 */
1131static ssize_t pagemap_read(struct file *file, char __user *buf,
1132                            size_t count, loff_t *ppos)
1133{
1134        struct task_struct *task = get_proc_task(file_inode(file));
1135        struct mm_struct *mm;
1136        struct pagemapread pm;
1137        int ret = -ESRCH;
1138        struct mm_walk pagemap_walk = {};
1139        unsigned long src;
1140        unsigned long svpfn;
1141        unsigned long start_vaddr;
1142        unsigned long end_vaddr;
1143        int copied = 0;
1144
1145        if (!task)
1146                goto out;
1147
1148        ret = -EINVAL;
1149        /* file position must be aligned */
1150        if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1151                goto out_task;
1152
1153        ret = 0;
1154        if (!count)
1155                goto out_task;
1156
1157        pm.v2 = soft_dirty_cleared;
1158        pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1159        pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1160        ret = -ENOMEM;
1161        if (!pm.buffer)
1162                goto out_task;
1163
1164        mm = mm_access(task, PTRACE_MODE_READ);
1165        ret = PTR_ERR(mm);
1166        if (!mm || IS_ERR(mm))
1167                goto out_free;
1168
1169        pagemap_walk.pmd_entry = pagemap_pte_range;
1170        pagemap_walk.pte_hole = pagemap_pte_hole;
1171#ifdef CONFIG_HUGETLB_PAGE
1172        pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1173#endif
1174        pagemap_walk.mm = mm;
1175        pagemap_walk.private = &pm;
1176
1177        src = *ppos;
1178        svpfn = src / PM_ENTRY_BYTES;
1179        start_vaddr = svpfn << PAGE_SHIFT;
1180        end_vaddr = TASK_SIZE_OF(task);
1181
1182        /* watch out for wraparound */
1183        if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1184                start_vaddr = end_vaddr;
1185
1186        /*
1187         * The odds are that this will stop walking way
1188         * before end_vaddr, because the length of the
1189         * user buffer is tracked in "pm", and the walk
1190         * will stop when we hit the end of the buffer.
1191         */
1192        ret = 0;
1193        while (count && (start_vaddr < end_vaddr)) {
1194                int len;
1195                unsigned long end;
1196
1197                pm.pos = 0;
1198                end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1199                /* overflow ? */
1200                if (end < start_vaddr || end > end_vaddr)
1201                        end = end_vaddr;
1202                down_read(&mm->mmap_sem);
1203                ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1204                up_read(&mm->mmap_sem);
1205                start_vaddr = end;
1206
1207                len = min(count, PM_ENTRY_BYTES * pm.pos);
1208                if (copy_to_user(buf, pm.buffer, len)) {
1209                        ret = -EFAULT;
1210                        goto out_mm;
1211                }
1212                copied += len;
1213                buf += len;
1214                count -= len;
1215        }
1216        *ppos += copied;
1217        if (!ret || ret == PM_END_OF_BUFFER)
1218                ret = copied;
1219
1220out_mm:
1221        mmput(mm);
1222out_free:
1223        kfree(pm.buffer);
1224out_task:
1225        put_task_struct(task);
1226out:
1227        return ret;
1228}
1229
1230static int pagemap_open(struct inode *inode, struct file *file)
1231{
1232        pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1233                        "to stop being page-shift some time soon. See the "
1234                        "linux/Documentation/vm/pagemap.txt for details.\n");
1235        return 0;
1236}
1237
1238const struct file_operations proc_pagemap_operations = {
1239        .llseek         = mem_lseek, /* borrow this */
1240        .read           = pagemap_read,
1241        .open           = pagemap_open,
1242};
1243#endif /* CONFIG_PROC_PAGE_MONITOR */
1244
1245#ifdef CONFIG_NUMA
1246
1247struct numa_maps {
1248        struct vm_area_struct *vma;
1249        unsigned long pages;
1250        unsigned long anon;
1251        unsigned long active;
1252        unsigned long writeback;
1253        unsigned long mapcount_max;
1254        unsigned long dirty;
1255        unsigned long swapcache;
1256        unsigned long node[MAX_NUMNODES];
1257};
1258
1259struct numa_maps_private {
1260        struct proc_maps_private proc_maps;
1261        struct numa_maps md;
1262};
1263
1264static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1265                        unsigned long nr_pages)
1266{
1267        int count = page_mapcount(page);
1268
1269        md->pages += nr_pages;
1270        if (pte_dirty || PageDirty(page))
1271                md->dirty += nr_pages;
1272
1273        if (PageSwapCache(page))
1274                md->swapcache += nr_pages;
1275
1276        if (PageActive(page) || PageUnevictable(page))
1277                md->active += nr_pages;
1278
1279        if (PageWriteback(page))
1280                md->writeback += nr_pages;
1281
1282        if (PageAnon(page))
1283                md->anon += nr_pages;
1284
1285        if (count > md->mapcount_max)
1286                md->mapcount_max = count;
1287
1288        md->node[page_to_nid(page)] += nr_pages;
1289}
1290
1291static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1292                unsigned long addr)
1293{
1294        struct page *page;
1295        int nid;
1296
1297        if (!pte_present(pte))
1298                return NULL;
1299
1300        page = vm_normal_page(vma, addr, pte);
1301        if (!page)
1302                return NULL;
1303
1304        if (PageReserved(page))
1305                return NULL;
1306
1307        nid = page_to_nid(page);
1308        if (!node_isset(nid, node_states[N_MEMORY]))
1309                return NULL;
1310
1311        return page;
1312}
1313
1314static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1315                unsigned long end, struct mm_walk *walk)
1316{
1317        struct numa_maps *md;
1318        spinlock_t *ptl;
1319        pte_t *orig_pte;
1320        pte_t *pte;
1321
1322        md = walk->private;
1323
1324        if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
1325                pte_t huge_pte = *(pte_t *)pmd;
1326                struct page *page;
1327
1328                page = can_gather_numa_stats(huge_pte, md->vma, addr);
1329                if (page)
1330                        gather_stats(page, md, pte_dirty(huge_pte),
1331                                     HPAGE_PMD_SIZE/PAGE_SIZE);
1332                spin_unlock(&walk->mm->page_table_lock);
1333                return 0;
1334        }
1335
1336        if (pmd_trans_unstable(pmd))
1337                return 0;
1338        orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1339        do {
1340                struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1341                if (!page)
1342                        continue;
1343                gather_stats(page, md, pte_dirty(*pte), 1);
1344
1345        } while (pte++, addr += PAGE_SIZE, addr != end);
1346        pte_unmap_unlock(orig_pte, ptl);
1347        return 0;
1348}
1349#ifdef CONFIG_HUGETLB_PAGE
1350static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1351                unsigned long addr, unsigned long end, struct mm_walk *walk)
1352{
1353        struct numa_maps *md;
1354        struct page *page;
1355
1356        if (pte_none(*pte))
1357                return 0;
1358
1359        page = pte_page(*pte);
1360        if (!page)
1361                return 0;
1362
1363        md = walk->private;
1364        gather_stats(page, md, pte_dirty(*pte), 1);
1365        return 0;
1366}
1367
1368#else
1369static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1370                unsigned long addr, unsigned long end, struct mm_walk *walk)
1371{
1372        return 0;
1373}
1374#endif
1375
1376/*
1377 * Display pages allocated per node and memory policy via /proc.
1378 */
1379static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1380{
1381        struct numa_maps_private *numa_priv = m->private;
1382        struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1383        struct vm_area_struct *vma = v;
1384        struct numa_maps *md = &numa_priv->md;
1385        struct file *file = vma->vm_file;
1386        struct task_struct *task = proc_priv->task;
1387        struct mm_struct *mm = vma->vm_mm;
1388        struct mm_walk walk = {};
1389        struct mempolicy *pol;
1390        int n;
1391        char buffer[50];
1392
1393        if (!mm)
1394                return 0;
1395
1396        /* Ensure we start with an empty set of numa_maps statistics. */
1397        memset(md, 0, sizeof(*md));
1398
1399        md->vma = vma;
1400
1401        walk.hugetlb_entry = gather_hugetbl_stats;
1402        walk.pmd_entry = gather_pte_stats;
1403        walk.private = md;
1404        walk.mm = mm;
1405
1406        pol = get_vma_policy(task, vma, vma->vm_start);
1407        n = mpol_to_str(buffer, sizeof(buffer), pol);
1408        mpol_cond_put(pol);
1409        if (n < 0)
1410                return n;
1411
1412        seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1413
1414        if (file) {
1415                seq_printf(m, " file=");
1416                seq_path(m, &file->f_path, "\n\t= ");
1417        } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1418                seq_printf(m, " heap");
1419        } else {
1420                pid_t tid = vm_is_stack(task, vma, is_pid);
1421                if (tid != 0) {
1422                        /*
1423                         * Thread stack in /proc/PID/task/TID/maps or
1424                         * the main process stack.
1425                         */
1426                        if (!is_pid || (vma->vm_start <= mm->start_stack &&
1427                            vma->vm_end >= mm->start_stack))
1428                                seq_printf(m, " stack");
1429                        else
1430                                seq_printf(m, " stack:%d", tid);
1431                }
1432        }
1433
1434        if (is_vm_hugetlb_page(vma))
1435                seq_printf(m, " huge");
1436
1437        walk_page_range(vma->vm_start, vma->vm_end, &walk);
1438
1439        if (!md->pages)
1440                goto out;
1441
1442        if (md->anon)
1443                seq_printf(m, " anon=%lu", md->anon);
1444
1445        if (md->dirty)
1446                seq_printf(m, " dirty=%lu", md->dirty);
1447
1448        if (md->pages != md->anon && md->pages != md->dirty)
1449                seq_printf(m, " mapped=%lu", md->pages);
1450
1451        if (md->mapcount_max > 1)
1452                seq_printf(m, " mapmax=%lu", md->mapcount_max);
1453
1454        if (md->swapcache)
1455                seq_printf(m, " swapcache=%lu", md->swapcache);
1456
1457        if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1458                seq_printf(m, " active=%lu", md->active);
1459
1460        if (md->writeback)
1461                seq_printf(m, " writeback=%lu", md->writeback);
1462
1463        for_each_node_state(n, N_MEMORY)
1464                if (md->node[n])
1465                        seq_printf(m, " N%d=%lu", n, md->node[n]);
1466out:
1467        seq_putc(m, '\n');
1468
1469        if (m->count < m->size)
1470                m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1471        return 0;
1472}
1473
1474static int show_pid_numa_map(struct seq_file *m, void *v)
1475{
1476        return show_numa_map(m, v, 1);
1477}
1478
1479static int show_tid_numa_map(struct seq_file *m, void *v)
1480{
1481        return show_numa_map(m, v, 0);
1482}
1483
1484static const struct seq_operations proc_pid_numa_maps_op = {
1485        .start  = m_start,
1486        .next   = m_next,
1487        .stop   = m_stop,
1488        .show   = show_pid_numa_map,
1489};
1490
1491static const struct seq_operations proc_tid_numa_maps_op = {
1492        .start  = m_start,
1493        .next   = m_next,
1494        .stop   = m_stop,
1495        .show   = show_tid_numa_map,
1496};
1497
1498static int numa_maps_open(struct inode *inode, struct file *file,
1499                          const struct seq_operations *ops)
1500{
1501        struct numa_maps_private *priv;
1502        int ret = -ENOMEM;
1503        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1504        if (priv) {
1505                priv->proc_maps.pid = proc_pid(inode);
1506                ret = seq_open(file, ops);
1507                if (!ret) {
1508                        struct seq_file *m = file->private_data;
1509                        m->private = priv;
1510                } else {
1511                        kfree(priv);
1512                }
1513        }
1514        return ret;
1515}
1516
1517static int pid_numa_maps_open(struct inode *inode, struct file *file)
1518{
1519        return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1520}
1521
1522static int tid_numa_maps_open(struct inode *inode, struct file *file)
1523{
1524        return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1525}
1526
1527const struct file_operations proc_pid_numa_maps_operations = {
1528        .open           = pid_numa_maps_open,
1529        .read           = seq_read,
1530        .llseek         = seq_lseek,
1531        .release        = seq_release_private,
1532};
1533
1534const struct file_operations proc_tid_numa_maps_operations = {
1535        .open           = tid_numa_maps_open,
1536        .read           = seq_read,
1537        .llseek         = seq_lseek,
1538        .release        = seq_release_private,
1539};
1540#endif /* CONFIG_NUMA */
1541