linux/fs/proc/task_nommu.c
<<
>>
Prefs
   1
   2#include <linux/mm.h>
   3#include <linux/file.h>
   4#include <linux/fdtable.h>
   5#include <linux/fs_struct.h>
   6#include <linux/mount.h>
   7#include <linux/ptrace.h>
   8#include <linux/slab.h>
   9#include <linux/seq_file.h>
  10#include <linux/sched/mm.h>
  11
  12#include "internal.h"
  13
  14/*
  15 * Logic: we've got two memory sums for each process, "shared", and
  16 * "non-shared". Shared memory may get counted more than once, for
  17 * each process that owns it. Non-shared memory is counted
  18 * accurately.
  19 */
  20void task_mem(struct seq_file *m, struct mm_struct *mm)
  21{
  22        struct vm_area_struct *vma;
  23        struct vm_region *region;
  24        struct rb_node *p;
  25        unsigned long bytes = 0, sbytes = 0, slack = 0, size;
  26        
  27        down_read(&mm->mmap_sem);
  28        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
  29                vma = rb_entry(p, struct vm_area_struct, vm_rb);
  30
  31                bytes += kobjsize(vma);
  32
  33                region = vma->vm_region;
  34                if (region) {
  35                        size = kobjsize(region);
  36                        size += region->vm_end - region->vm_start;
  37                } else {
  38                        size = vma->vm_end - vma->vm_start;
  39                }
  40
  41                if (atomic_read(&mm->mm_count) > 1 ||
  42                    vma->vm_flags & VM_MAYSHARE) {
  43                        sbytes += size;
  44                } else {
  45                        bytes += size;
  46                        if (region)
  47                                slack = region->vm_end - vma->vm_end;
  48                }
  49        }
  50
  51        if (atomic_read(&mm->mm_count) > 1)
  52                sbytes += kobjsize(mm);
  53        else
  54                bytes += kobjsize(mm);
  55        
  56        if (current->fs && current->fs->users > 1)
  57                sbytes += kobjsize(current->fs);
  58        else
  59                bytes += kobjsize(current->fs);
  60
  61        if (current->files && atomic_read(&current->files->count) > 1)
  62                sbytes += kobjsize(current->files);
  63        else
  64                bytes += kobjsize(current->files);
  65
  66        if (current->sighand && atomic_read(&current->sighand->count) > 1)
  67                sbytes += kobjsize(current->sighand);
  68        else
  69                bytes += kobjsize(current->sighand);
  70
  71        bytes += kobjsize(current); /* includes kernel stack */
  72
  73        seq_printf(m,
  74                "Mem:\t%8lu bytes\n"
  75                "Slack:\t%8lu bytes\n"
  76                "Shared:\t%8lu bytes\n",
  77                bytes, slack, sbytes);
  78
  79        up_read(&mm->mmap_sem);
  80}
  81
  82unsigned long task_vsize(struct mm_struct *mm)
  83{
  84        struct vm_area_struct *vma;
  85        struct rb_node *p;
  86        unsigned long vsize = 0;
  87
  88        down_read(&mm->mmap_sem);
  89        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
  90                vma = rb_entry(p, struct vm_area_struct, vm_rb);
  91                vsize += vma->vm_end - vma->vm_start;
  92        }
  93        up_read(&mm->mmap_sem);
  94        return vsize;
  95}
  96
  97unsigned long task_statm(struct mm_struct *mm,
  98                         unsigned long *shared, unsigned long *text,
  99                         unsigned long *data, unsigned long *resident)
 100{
 101        struct vm_area_struct *vma;
 102        struct vm_region *region;
 103        struct rb_node *p;
 104        unsigned long size = kobjsize(mm);
 105
 106        down_read(&mm->mmap_sem);
 107        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
 108                vma = rb_entry(p, struct vm_area_struct, vm_rb);
 109                size += kobjsize(vma);
 110                region = vma->vm_region;
 111                if (region) {
 112                        size += kobjsize(region);
 113                        size += region->vm_end - region->vm_start;
 114                }
 115        }
 116
 117        *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
 118                >> PAGE_SHIFT;
 119        *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
 120                >> PAGE_SHIFT;
 121        up_read(&mm->mmap_sem);
 122        size >>= PAGE_SHIFT;
 123        size += *text + *data;
 124        *resident = size;
 125        return size;
 126}
 127
 128static int is_stack(struct proc_maps_private *priv,
 129                    struct vm_area_struct *vma)
 130{
 131        struct mm_struct *mm = vma->vm_mm;
 132
 133        /*
 134         * We make no effort to guess what a given thread considers to be
 135         * its "stack".  It's not even well-defined for programs written
 136         * languages like Go.
 137         */
 138        return vma->vm_start <= mm->start_stack &&
 139                vma->vm_end >= mm->start_stack;
 140}
 141
 142/*
 143 * display a single VMA to a sequenced file
 144 */
 145static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
 146                          int is_pid)
 147{
 148        struct mm_struct *mm = vma->vm_mm;
 149        struct proc_maps_private *priv = m->private;
 150        unsigned long ino = 0;
 151        struct file *file;
 152        dev_t dev = 0;
 153        int flags;
 154        unsigned long long pgoff = 0;
 155
 156        flags = vma->vm_flags;
 157        file = vma->vm_file;
 158
 159        if (file) {
 160                struct inode *inode = file_inode(vma->vm_file);
 161                dev = inode->i_sb->s_dev;
 162                ino = inode->i_ino;
 163                pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
 164        }
 165
 166        seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 167        seq_printf(m,
 168                   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 169                   vma->vm_start,
 170                   vma->vm_end,
 171                   flags & VM_READ ? 'r' : '-',
 172                   flags & VM_WRITE ? 'w' : '-',
 173                   flags & VM_EXEC ? 'x' : '-',
 174                   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
 175                   pgoff,
 176                   MAJOR(dev), MINOR(dev), ino);
 177
 178        if (file) {
 179                seq_pad(m, ' ');
 180                seq_file_path(m, file, "");
 181        } else if (mm && is_stack(priv, vma)) {
 182                seq_pad(m, ' ');
 183                seq_printf(m, "[stack]");
 184        }
 185
 186        seq_putc(m, '\n');
 187        return 0;
 188}
 189
 190/*
 191 * display mapping lines for a particular process's /proc/pid/maps
 192 */
 193static int show_map(struct seq_file *m, void *_p, int is_pid)
 194{
 195        struct rb_node *p = _p;
 196
 197        return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb),
 198                              is_pid);
 199}
 200
 201static int show_pid_map(struct seq_file *m, void *_p)
 202{
 203        return show_map(m, _p, 1);
 204}
 205
 206static int show_tid_map(struct seq_file *m, void *_p)
 207{
 208        return show_map(m, _p, 0);
 209}
 210
 211static void *m_start(struct seq_file *m, loff_t *pos)
 212{
 213        struct proc_maps_private *priv = m->private;
 214        struct mm_struct *mm;
 215        struct rb_node *p;
 216        loff_t n = *pos;
 217
 218        /* pin the task and mm whilst we play with them */
 219        priv->task = get_proc_task(priv->inode);
 220        if (!priv->task)
 221                return ERR_PTR(-ESRCH);
 222
 223        mm = priv->mm;
 224        if (!mm || !mmget_not_zero(mm))
 225                return NULL;
 226
 227        down_read(&mm->mmap_sem);
 228        /* start from the Nth VMA */
 229        for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
 230                if (n-- == 0)
 231                        return p;
 232
 233        up_read(&mm->mmap_sem);
 234        mmput(mm);
 235        return NULL;
 236}
 237
 238static void m_stop(struct seq_file *m, void *_vml)
 239{
 240        struct proc_maps_private *priv = m->private;
 241
 242        if (!IS_ERR_OR_NULL(_vml)) {
 243                up_read(&priv->mm->mmap_sem);
 244                mmput(priv->mm);
 245        }
 246        if (priv->task) {
 247                put_task_struct(priv->task);
 248                priv->task = NULL;
 249        }
 250}
 251
 252static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
 253{
 254        struct rb_node *p = _p;
 255
 256        (*pos)++;
 257        return p ? rb_next(p) : NULL;
 258}
 259
 260static const struct seq_operations proc_pid_maps_ops = {
 261        .start  = m_start,
 262        .next   = m_next,
 263        .stop   = m_stop,
 264        .show   = show_pid_map
 265};
 266
 267static const struct seq_operations proc_tid_maps_ops = {
 268        .start  = m_start,
 269        .next   = m_next,
 270        .stop   = m_stop,
 271        .show   = show_tid_map
 272};
 273
 274static int maps_open(struct inode *inode, struct file *file,
 275                     const struct seq_operations *ops)
 276{
 277        struct proc_maps_private *priv;
 278
 279        priv = __seq_open_private(file, ops, sizeof(*priv));
 280        if (!priv)
 281                return -ENOMEM;
 282
 283        priv->inode = inode;
 284        priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 285        if (IS_ERR(priv->mm)) {
 286                int err = PTR_ERR(priv->mm);
 287
 288                seq_release_private(inode, file);
 289                return err;
 290        }
 291
 292        return 0;
 293}
 294
 295
 296static int map_release(struct inode *inode, struct file *file)
 297{
 298        struct seq_file *seq = file->private_data;
 299        struct proc_maps_private *priv = seq->private;
 300
 301        if (priv->mm)
 302                mmdrop(priv->mm);
 303
 304        return seq_release_private(inode, file);
 305}
 306
 307static int pid_maps_open(struct inode *inode, struct file *file)
 308{
 309        return maps_open(inode, file, &proc_pid_maps_ops);
 310}
 311
 312static int tid_maps_open(struct inode *inode, struct file *file)
 313{
 314        return maps_open(inode, file, &proc_tid_maps_ops);
 315}
 316
 317const struct file_operations proc_pid_maps_operations = {
 318        .open           = pid_maps_open,
 319        .read           = seq_read,
 320        .llseek         = seq_lseek,
 321        .release        = map_release,
 322};
 323
 324const struct file_operations proc_tid_maps_operations = {
 325        .open           = tid_maps_open,
 326        .read           = seq_read,
 327        .llseek         = seq_lseek,
 328        .release        = map_release,
 329};
 330
 331