linux/fs/proc/base.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/proc/base.c
   4 *
   5 *  Copyright (C) 1991, 1992 Linus Torvalds
   6 *
   7 *  proc base directory handling functions
   8 *
   9 *  1999, Al Viro. Rewritten. Now it covers the whole per-process part.
  10 *  Instead of using magical inumbers to determine the kind of object
  11 *  we allocate and fill in-core inodes upon lookup. They don't even
  12 *  go into icache. We cache the reference to task_struct upon lookup too.
  13 *  Eventually it should become a filesystem in its own. We don't use the
  14 *  rest of procfs anymore.
  15 *
  16 *
  17 *  Changelog:
  18 *  17-Jan-2005
  19 *  Allan Bezerra
  20 *  Bruna Moreira <bruna.moreira@indt.org.br>
  21 *  Edjard Mota <edjard.mota@indt.org.br>
  22 *  Ilias Biris <ilias.biris@indt.org.br>
  23 *  Mauricio Lin <mauricio.lin@indt.org.br>
  24 *
  25 *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
  26 *
  27 *  A new process specific entry (smaps) included in /proc. It shows the
  28 *  size of rss for each memory area. The maps entry lacks information
  29 *  about physical memory size (rss) for each mapped file, i.e.,
  30 *  rss information for executables and library files.
  31 *  This additional information is useful for any tools that need to know
  32 *  about physical memory consumption for a process specific library.
  33 *
  34 *  Changelog:
  35 *  21-Feb-2005
  36 *  Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
  37 *  Pud inclusion in the page table walking.
  38 *
  39 *  ChangeLog:
  40 *  10-Mar-2005
  41 *  10LE Instituto Nokia de Tecnologia - INdT:
  42 *  A better way to walks through the page table as suggested by Hugh Dickins.
  43 *
  44 *  Simo Piiroinen <simo.piiroinen@nokia.com>:
  45 *  Smaps information related to shared, private, clean and dirty pages.
  46 *
  47 *  Paul Mundt <paul.mundt@nokia.com>:
  48 *  Overall revision about smaps.
  49 */
  50
  51#include <linux/uaccess.h>
  52
  53#include <linux/errno.h>
  54#include <linux/time.h>
  55#include <linux/proc_fs.h>
  56#include <linux/stat.h>
  57#include <linux/task_io_accounting_ops.h>
  58#include <linux/init.h>
  59#include <linux/capability.h>
  60#include <linux/file.h>
  61#include <linux/fdtable.h>
  62#include <linux/generic-radix-tree.h>
  63#include <linux/string.h>
  64#include <linux/seq_file.h>
  65#include <linux/namei.h>
  66#include <linux/mnt_namespace.h>
  67#include <linux/mm.h>
  68#include <linux/swap.h>
  69#include <linux/rcupdate.h>
  70#include <linux/kallsyms.h>
  71#include <linux/stacktrace.h>
  72#include <linux/resource.h>
  73#include <linux/module.h>
  74#include <linux/mount.h>
  75#include <linux/security.h>
  76#include <linux/ptrace.h>
  77#include <linux/tracehook.h>
  78#include <linux/printk.h>
  79#include <linux/cache.h>
  80#include <linux/cgroup.h>
  81#include <linux/cpuset.h>
  82#include <linux/audit.h>
  83#include <linux/poll.h>
  84#include <linux/nsproxy.h>
  85#include <linux/oom.h>
  86#include <linux/elf.h>
  87#include <linux/pid_namespace.h>
  88#include <linux/user_namespace.h>
  89#include <linux/fs_struct.h>
  90#include <linux/slab.h>
  91#include <linux/sched/autogroup.h>
  92#include <linux/sched/mm.h>
  93#include <linux/sched/coredump.h>
  94#include <linux/sched/debug.h>
  95#include <linux/sched/stat.h>
  96#include <linux/posix-timers.h>
  97#include <linux/time_namespace.h>
  98#include <linux/resctrl.h>
  99#include <trace/events/oom.h>
 100#include "internal.h"
 101#include "fd.h"
 102
 103#include "../../lib/kstrtox.h"
 104
 105/* NOTE:
 106 *      Implementing inode permission operations in /proc is almost
 107 *      certainly an error.  Permission checks need to happen during
 108 *      each system call not at open time.  The reason is that most of
 109 *      what we wish to check for permissions in /proc varies at runtime.
 110 *
 111 *      The classic example of a problem is opening file descriptors
 112 *      in /proc for a task before it execs a suid executable.
 113 */
 114
 115static u8 nlink_tid __ro_after_init;
 116static u8 nlink_tgid __ro_after_init;
 117
 118struct pid_entry {
 119        const char *name;
 120        unsigned int len;
 121        umode_t mode;
 122        const struct inode_operations *iop;
 123        const struct file_operations *fop;
 124        union proc_op op;
 125};
 126
 127#define NOD(NAME, MODE, IOP, FOP, OP) {                 \
 128        .name = (NAME),                                 \
 129        .len  = sizeof(NAME) - 1,                       \
 130        .mode = MODE,                                   \
 131        .iop  = IOP,                                    \
 132        .fop  = FOP,                                    \
 133        .op   = OP,                                     \
 134}
 135
 136#define DIR(NAME, MODE, iops, fops)     \
 137        NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} )
 138#define LNK(NAME, get_link)                                     \
 139        NOD(NAME, (S_IFLNK|S_IRWXUGO),                          \
 140                &proc_pid_link_inode_operations, NULL,          \
 141                { .proc_get_link = get_link } )
 142#define REG(NAME, MODE, fops)                           \
 143        NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {})
 144#define ONE(NAME, MODE, show)                           \
 145        NOD(NAME, (S_IFREG|(MODE)),                     \
 146                NULL, &proc_single_file_operations,     \
 147                { .proc_show = show } )
 148#define ATTR(LSM, NAME, MODE)                           \
 149        NOD(NAME, (S_IFREG|(MODE)),                     \
 150                NULL, &proc_pid_attr_operations,        \
 151                { .lsm = LSM })
 152
 153/*
 154 * Count the number of hardlinks for the pid_entry table, excluding the .
 155 * and .. links.
 156 */
 157static unsigned int __init pid_entry_nlink(const struct pid_entry *entries,
 158        unsigned int n)
 159{
 160        unsigned int i;
 161        unsigned int count;
 162
 163        count = 2;
 164        for (i = 0; i < n; ++i) {
 165                if (S_ISDIR(entries[i].mode))
 166                        ++count;
 167        }
 168
 169        return count;
 170}
 171
 172static int get_task_root(struct task_struct *task, struct path *root)
 173{
 174        int result = -ENOENT;
 175
 176        task_lock(task);
 177        if (task->fs) {
 178                get_fs_root(task->fs, root);
 179                result = 0;
 180        }
 181        task_unlock(task);
 182        return result;
 183}
 184
 185static int proc_cwd_link(struct dentry *dentry, struct path *path)
 186{
 187        struct task_struct *task = get_proc_task(d_inode(dentry));
 188        int result = -ENOENT;
 189
 190        if (task) {
 191                task_lock(task);
 192                if (task->fs) {
 193                        get_fs_pwd(task->fs, path);
 194                        result = 0;
 195                }
 196                task_unlock(task);
 197                put_task_struct(task);
 198        }
 199        return result;
 200}
 201
 202static int proc_root_link(struct dentry *dentry, struct path *path)
 203{
 204        struct task_struct *task = get_proc_task(d_inode(dentry));
 205        int result = -ENOENT;
 206
 207        if (task) {
 208                result = get_task_root(task, path);
 209                put_task_struct(task);
 210        }
 211        return result;
 212}
 213
 214/*
 215 * If the user used setproctitle(), we just get the string from
 216 * user space at arg_start, and limit it to a maximum of one page.
 217 */
 218static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf,
 219                                size_t count, unsigned long pos,
 220                                unsigned long arg_start)
 221{
 222        char *page;
 223        int ret, got;
 224
 225        if (pos >= PAGE_SIZE)
 226                return 0;
 227
 228        page = (char *)__get_free_page(GFP_KERNEL);
 229        if (!page)
 230                return -ENOMEM;
 231
 232        ret = 0;
 233        got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON);
 234        if (got > 0) {
 235                int len = strnlen(page, got);
 236
 237                /* Include the NUL character if it was found */
 238                if (len < got)
 239                        len++;
 240
 241                if (len > pos) {
 242                        len -= pos;
 243                        if (len > count)
 244                                len = count;
 245                        len -= copy_to_user(buf, page+pos, len);
 246                        if (!len)
 247                                len = -EFAULT;
 248                        ret = len;
 249                }
 250        }
 251        free_page((unsigned long)page);
 252        return ret;
 253}
 254
 255static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
 256                              size_t count, loff_t *ppos)
 257{
 258        unsigned long arg_start, arg_end, env_start, env_end;
 259        unsigned long pos, len;
 260        char *page, c;
 261
 262        /* Check if process spawned far enough to have cmdline. */
 263        if (!mm->env_end)
 264                return 0;
 265
 266        spin_lock(&mm->arg_lock);
 267        arg_start = mm->arg_start;
 268        arg_end = mm->arg_end;
 269        env_start = mm->env_start;
 270        env_end = mm->env_end;
 271        spin_unlock(&mm->arg_lock);
 272
 273        if (arg_start >= arg_end)
 274                return 0;
 275
 276        /*
 277         * We allow setproctitle() to overwrite the argument
 278         * strings, and overflow past the original end. But
 279         * only when it overflows into the environment area.
 280         */
 281        if (env_start != arg_end || env_end < env_start)
 282                env_start = env_end = arg_end;
 283        len = env_end - arg_start;
 284
 285        /* We're not going to care if "*ppos" has high bits set */
 286        pos = *ppos;
 287        if (pos >= len)
 288                return 0;
 289        if (count > len - pos)
 290                count = len - pos;
 291        if (!count)
 292                return 0;
 293
 294        /*
 295         * Magical special case: if the argv[] end byte is not
 296         * zero, the user has overwritten it with setproctitle(3).
 297         *
 298         * Possible future enhancement: do this only once when
 299         * pos is 0, and set a flag in the 'struct file'.
 300         */
 301        if (access_remote_vm(mm, arg_end-1, &c, 1, FOLL_ANON) == 1 && c)
 302                return get_mm_proctitle(mm, buf, count, pos, arg_start);
 303
 304        /*
 305         * For the non-setproctitle() case we limit things strictly
 306         * to the [arg_start, arg_end[ range.
 307         */
 308        pos += arg_start;
 309        if (pos < arg_start || pos >= arg_end)
 310                return 0;
 311        if (count > arg_end - pos)
 312                count = arg_end - pos;
 313
 314        page = (char *)__get_free_page(GFP_KERNEL);
 315        if (!page)
 316                return -ENOMEM;
 317
 318        len = 0;
 319        while (count) {
 320                int got;
 321                size_t size = min_t(size_t, PAGE_SIZE, count);
 322
 323                got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
 324                if (got <= 0)
 325                        break;
 326                got -= copy_to_user(buf, page, got);
 327                if (unlikely(!got)) {
 328                        if (!len)
 329                                len = -EFAULT;
 330                        break;
 331                }
 332                pos += got;
 333                buf += got;
 334                len += got;
 335                count -= got;
 336        }
 337
 338        free_page((unsigned long)page);
 339        return len;
 340}
 341
 342static ssize_t get_task_cmdline(struct task_struct *tsk, char __user *buf,
 343                                size_t count, loff_t *pos)
 344{
 345        struct mm_struct *mm;
 346        ssize_t ret;
 347
 348        mm = get_task_mm(tsk);
 349        if (!mm)
 350                return 0;
 351
 352        ret = get_mm_cmdline(mm, buf, count, pos);
 353        mmput(mm);
 354        return ret;
 355}
 356
 357static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
 358                                     size_t count, loff_t *pos)
 359{
 360        struct task_struct *tsk;
 361        ssize_t ret;
 362
 363        BUG_ON(*pos < 0);
 364
 365        tsk = get_proc_task(file_inode(file));
 366        if (!tsk)
 367                return -ESRCH;
 368        ret = get_task_cmdline(tsk, buf, count, pos);
 369        put_task_struct(tsk);
 370        if (ret > 0)
 371                *pos += ret;
 372        return ret;
 373}
 374
 375static const struct file_operations proc_pid_cmdline_ops = {
 376        .read   = proc_pid_cmdline_read,
 377        .llseek = generic_file_llseek,
 378};
 379
 380#ifdef CONFIG_KALLSYMS
 381/*
 382 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
 383 * Returns the resolved symbol.  If that fails, simply return the address.
 384 */
 385static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
 386                          struct pid *pid, struct task_struct *task)
 387{
 388        unsigned long wchan;
 389        char symname[KSYM_NAME_LEN];
 390
 391        if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
 392                goto print0;
 393
 394        wchan = get_wchan(task);
 395        if (wchan && !lookup_symbol_name(wchan, symname)) {
 396                seq_puts(m, symname);
 397                return 0;
 398        }
 399
 400print0:
 401        seq_putc(m, '0');
 402        return 0;
 403}
 404#endif /* CONFIG_KALLSYMS */
 405
 406static int lock_trace(struct task_struct *task)
 407{
 408        int err = mutex_lock_killable(&task->signal->exec_update_mutex);
 409        if (err)
 410                return err;
 411        if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
 412                mutex_unlock(&task->signal->exec_update_mutex);
 413                return -EPERM;
 414        }
 415        return 0;
 416}
 417
 418static void unlock_trace(struct task_struct *task)
 419{
 420        mutex_unlock(&task->signal->exec_update_mutex);
 421}
 422
 423#ifdef CONFIG_STACKTRACE
 424
 425#define MAX_STACK_TRACE_DEPTH   64
 426
 427static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
 428                          struct pid *pid, struct task_struct *task)
 429{
 430        unsigned long *entries;
 431        int err;
 432
 433        /*
 434         * The ability to racily run the kernel stack unwinder on a running task
 435         * and then observe the unwinder output is scary; while it is useful for
 436         * debugging kernel issues, it can also allow an attacker to leak kernel
 437         * stack contents.
 438         * Doing this in a manner that is at least safe from races would require
 439         * some work to ensure that the remote task can not be scheduled; and
 440         * even then, this would still expose the unwinder as local attack
 441         * surface.
 442         * Therefore, this interface is restricted to root.
 443         */
 444        if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
 445                return -EACCES;
 446
 447        entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
 448                                GFP_KERNEL);
 449        if (!entries)
 450                return -ENOMEM;
 451
 452        err = lock_trace(task);
 453        if (!err) {
 454                unsigned int i, nr_entries;
 455
 456                nr_entries = stack_trace_save_tsk(task, entries,
 457                                                  MAX_STACK_TRACE_DEPTH, 0);
 458
 459                for (i = 0; i < nr_entries; i++) {
 460                        seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
 461                }
 462
 463                unlock_trace(task);
 464        }
 465        kfree(entries);
 466
 467        return err;
 468}
 469#endif
 470
 471#ifdef CONFIG_SCHED_INFO
 472/*
 473 * Provides /proc/PID/schedstat
 474 */
 475static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
 476                              struct pid *pid, struct task_struct *task)
 477{
 478        if (unlikely(!sched_info_on()))
 479                seq_puts(m, "0 0 0\n");
 480        else
 481                seq_printf(m, "%llu %llu %lu\n",
 482                   (unsigned long long)task->se.sum_exec_runtime,
 483                   (unsigned long long)task->sched_info.run_delay,
 484                   task->sched_info.pcount);
 485
 486        return 0;
 487}
 488#endif
 489
 490#ifdef CONFIG_LATENCYTOP
 491static int lstats_show_proc(struct seq_file *m, void *v)
 492{
 493        int i;
 494        struct inode *inode = m->private;
 495        struct task_struct *task = get_proc_task(inode);
 496
 497        if (!task)
 498                return -ESRCH;
 499        seq_puts(m, "Latency Top version : v0.1\n");
 500        for (i = 0; i < LT_SAVECOUNT; i++) {
 501                struct latency_record *lr = &task->latency_record[i];
 502                if (lr->backtrace[0]) {
 503                        int q;
 504                        seq_printf(m, "%i %li %li",
 505                                   lr->count, lr->time, lr->max);
 506                        for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 507                                unsigned long bt = lr->backtrace[q];
 508
 509                                if (!bt)
 510                                        break;
 511                                seq_printf(m, " %ps", (void *)bt);
 512                        }
 513                        seq_putc(m, '\n');
 514                }
 515
 516        }
 517        put_task_struct(task);
 518        return 0;
 519}
 520
 521static int lstats_open(struct inode *inode, struct file *file)
 522{
 523        return single_open(file, lstats_show_proc, inode);
 524}
 525
 526static ssize_t lstats_write(struct file *file, const char __user *buf,
 527                            size_t count, loff_t *offs)
 528{
 529        struct task_struct *task = get_proc_task(file_inode(file));
 530
 531        if (!task)
 532                return -ESRCH;
 533        clear_tsk_latency_tracing(task);
 534        put_task_struct(task);
 535
 536        return count;
 537}
 538
 539static const struct file_operations proc_lstats_operations = {
 540        .open           = lstats_open,
 541        .read           = seq_read,
 542        .write          = lstats_write,
 543        .llseek         = seq_lseek,
 544        .release        = single_release,
 545};
 546
 547#endif
 548
 549static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
 550                          struct pid *pid, struct task_struct *task)
 551{
 552        unsigned long totalpages = totalram_pages() + total_swap_pages;
 553        unsigned long points = 0;
 554
 555        points = oom_badness(task, totalpages) * 1000 / totalpages;
 556        seq_printf(m, "%lu\n", points);
 557
 558        return 0;
 559}
 560
 561struct limit_names {
 562        const char *name;
 563        const char *unit;
 564};
 565
 566static const struct limit_names lnames[RLIM_NLIMITS] = {
 567        [RLIMIT_CPU] = {"Max cpu time", "seconds"},
 568        [RLIMIT_FSIZE] = {"Max file size", "bytes"},
 569        [RLIMIT_DATA] = {"Max data size", "bytes"},
 570        [RLIMIT_STACK] = {"Max stack size", "bytes"},
 571        [RLIMIT_CORE] = {"Max core file size", "bytes"},
 572        [RLIMIT_RSS] = {"Max resident set", "bytes"},
 573        [RLIMIT_NPROC] = {"Max processes", "processes"},
 574        [RLIMIT_NOFILE] = {"Max open files", "files"},
 575        [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"},
 576        [RLIMIT_AS] = {"Max address space", "bytes"},
 577        [RLIMIT_LOCKS] = {"Max file locks", "locks"},
 578        [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"},
 579        [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"},
 580        [RLIMIT_NICE] = {"Max nice priority", NULL},
 581        [RLIMIT_RTPRIO] = {"Max realtime priority", NULL},
 582        [RLIMIT_RTTIME] = {"Max realtime timeout", "us"},
 583};
 584
 585/* Display limits for a process */
 586static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
 587                           struct pid *pid, struct task_struct *task)
 588{
 589        unsigned int i;
 590        unsigned long flags;
 591
 592        struct rlimit rlim[RLIM_NLIMITS];
 593
 594        if (!lock_task_sighand(task, &flags))
 595                return 0;
 596        memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
 597        unlock_task_sighand(task, &flags);
 598
 599        /*
 600         * print the file header
 601         */
 602        seq_puts(m, "Limit                     "
 603                "Soft Limit           "
 604                "Hard Limit           "
 605                "Units     \n");
 606
 607        for (i = 0; i < RLIM_NLIMITS; i++) {
 608                if (rlim[i].rlim_cur == RLIM_INFINITY)
 609                        seq_printf(m, "%-25s %-20s ",
 610                                   lnames[i].name, "unlimited");
 611                else
 612                        seq_printf(m, "%-25s %-20lu ",
 613                                   lnames[i].name, rlim[i].rlim_cur);
 614
 615                if (rlim[i].rlim_max == RLIM_INFINITY)
 616                        seq_printf(m, "%-20s ", "unlimited");
 617                else
 618                        seq_printf(m, "%-20lu ", rlim[i].rlim_max);
 619
 620                if (lnames[i].unit)
 621                        seq_printf(m, "%-10s\n", lnames[i].unit);
 622                else
 623                        seq_putc(m, '\n');
 624        }
 625
 626        return 0;
 627}
 628
 629#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 630static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
 631                            struct pid *pid, struct task_struct *task)
 632{
 633        struct syscall_info info;
 634        u64 *args = &info.data.args[0];
 635        int res;
 636
 637        res = lock_trace(task);
 638        if (res)
 639                return res;
 640
 641        if (task_current_syscall(task, &info))
 642                seq_puts(m, "running\n");
 643        else if (info.data.nr < 0)
 644                seq_printf(m, "%d 0x%llx 0x%llx\n",
 645                           info.data.nr, info.sp, info.data.instruction_pointer);
 646        else
 647                seq_printf(m,
 648                       "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
 649                       info.data.nr,
 650                       args[0], args[1], args[2], args[3], args[4], args[5],
 651                       info.sp, info.data.instruction_pointer);
 652        unlock_trace(task);
 653
 654        return 0;
 655}
 656#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
 657
 658/************************************************************************/
 659/*                       Here the fs part begins                        */
 660/************************************************************************/
 661
 662/* permission checks */
 663static int proc_fd_access_allowed(struct inode *inode)
 664{
 665        struct task_struct *task;
 666        int allowed = 0;
 667        /* Allow access to a task's file descriptors if it is us or we
 668         * may use ptrace attach to the process and find out that
 669         * information.
 670         */
 671        task = get_proc_task(inode);
 672        if (task) {
 673                allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
 674                put_task_struct(task);
 675        }
 676        return allowed;
 677}
 678
 679int proc_setattr(struct dentry *dentry, struct iattr *attr)
 680{
 681        int error;
 682        struct inode *inode = d_inode(dentry);
 683
 684        if (attr->ia_valid & ATTR_MODE)
 685                return -EPERM;
 686
 687        error = setattr_prepare(dentry, attr);
 688        if (error)
 689                return error;
 690
 691        setattr_copy(inode, attr);
 692        mark_inode_dirty(inode);
 693        return 0;
 694}
 695
 696/*
 697 * May current process learn task's sched/cmdline info (for hide_pid_min=1)
 698 * or euid/egid (for hide_pid_min=2)?
 699 */
 700static bool has_pid_permissions(struct pid_namespace *pid,
 701                                 struct task_struct *task,
 702                                 int hide_pid_min)
 703{
 704        if (pid->hide_pid < hide_pid_min)
 705                return true;
 706        if (in_group_p(pid->pid_gid))
 707                return true;
 708        return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
 709}
 710
 711
 712static int proc_pid_permission(struct inode *inode, int mask)
 713{
 714        struct pid_namespace *pid = proc_pid_ns(inode);
 715        struct task_struct *task;
 716        bool has_perms;
 717
 718        task = get_proc_task(inode);
 719        if (!task)
 720                return -ESRCH;
 721        has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS);
 722        put_task_struct(task);
 723
 724        if (!has_perms) {
 725                if (pid->hide_pid == HIDEPID_INVISIBLE) {
 726                        /*
 727                         * Let's make getdents(), stat(), and open()
 728                         * consistent with each other.  If a process
 729                         * may not stat() a file, it shouldn't be seen
 730                         * in procfs at all.
 731                         */
 732                        return -ENOENT;
 733                }
 734
 735                return -EPERM;
 736        }
 737        return generic_permission(inode, mask);
 738}
 739
 740
 741
 742static const struct inode_operations proc_def_inode_operations = {
 743        .setattr        = proc_setattr,
 744};
 745
 746static int proc_single_show(struct seq_file *m, void *v)
 747{
 748        struct inode *inode = m->private;
 749        struct pid_namespace *ns = proc_pid_ns(inode);
 750        struct pid *pid = proc_pid(inode);
 751        struct task_struct *task;
 752        int ret;
 753
 754        task = get_pid_task(pid, PIDTYPE_PID);
 755        if (!task)
 756                return -ESRCH;
 757
 758        ret = PROC_I(inode)->op.proc_show(m, ns, pid, task);
 759
 760        put_task_struct(task);
 761        return ret;
 762}
 763
 764static int proc_single_open(struct inode *inode, struct file *filp)
 765{
 766        return single_open(filp, proc_single_show, inode);
 767}
 768
 769static const struct file_operations proc_single_file_operations = {
 770        .open           = proc_single_open,
 771        .read           = seq_read,
 772        .llseek         = seq_lseek,
 773        .release        = single_release,
 774};
 775
 776
 777struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
 778{
 779        struct task_struct *task = get_proc_task(inode);
 780        struct mm_struct *mm = ERR_PTR(-ESRCH);
 781
 782        if (task) {
 783                mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
 784                put_task_struct(task);
 785
 786                if (!IS_ERR_OR_NULL(mm)) {
 787                        /* ensure this mm_struct can't be freed */
 788                        mmgrab(mm);
 789                        /* but do not pin its memory */
 790                        mmput(mm);
 791                }
 792        }
 793
 794        return mm;
 795}
 796
 797static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
 798{
 799        struct mm_struct *mm = proc_mem_open(inode, mode);
 800
 801        if (IS_ERR(mm))
 802                return PTR_ERR(mm);
 803
 804        file->private_data = mm;
 805        return 0;
 806}
 807
 808static int mem_open(struct inode *inode, struct file *file)
 809{
 810        int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
 811
 812        /* OK to pass negative loff_t, we can catch out-of-range */
 813        file->f_mode |= FMODE_UNSIGNED_OFFSET;
 814
 815        return ret;
 816}
 817
 818static ssize_t mem_rw(struct file *file, char __user *buf,
 819                        size_t count, loff_t *ppos, int write)
 820{
 821        struct mm_struct *mm = file->private_data;
 822        unsigned long addr = *ppos;
 823        ssize_t copied;
 824        char *page;
 825        unsigned int flags;
 826
 827        if (!mm)
 828                return 0;
 829
 830        page = (char *)__get_free_page(GFP_KERNEL);
 831        if (!page)
 832                return -ENOMEM;
 833
 834        copied = 0;
 835        if (!mmget_not_zero(mm))
 836                goto free;
 837
 838        flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
 839
 840        while (count > 0) {
 841                int this_len = min_t(int, count, PAGE_SIZE);
 842
 843                if (write && copy_from_user(page, buf, this_len)) {
 844                        copied = -EFAULT;
 845                        break;
 846                }
 847
 848                this_len = access_remote_vm(mm, addr, page, this_len, flags);
 849                if (!this_len) {
 850                        if (!copied)
 851                                copied = -EIO;
 852                        break;
 853                }
 854
 855                if (!write && copy_to_user(buf, page, this_len)) {
 856                        copied = -EFAULT;
 857                        break;
 858                }
 859
 860                buf += this_len;
 861                addr += this_len;
 862                copied += this_len;
 863                count -= this_len;
 864        }
 865        *ppos = addr;
 866
 867        mmput(mm);
 868free:
 869        free_page((unsigned long) page);
 870        return copied;
 871}
 872
 873static ssize_t mem_read(struct file *file, char __user *buf,
 874                        size_t count, loff_t *ppos)
 875{
 876        return mem_rw(file, buf, count, ppos, 0);
 877}
 878
 879static ssize_t mem_write(struct file *file, const char __user *buf,
 880                         size_t count, loff_t *ppos)
 881{
 882        return mem_rw(file, (char __user*)buf, count, ppos, 1);
 883}
 884
 885loff_t mem_lseek(struct file *file, loff_t offset, int orig)
 886{
 887        switch (orig) {
 888        case 0:
 889                file->f_pos = offset;
 890                break;
 891        case 1:
 892                file->f_pos += offset;
 893                break;
 894        default:
 895                return -EINVAL;
 896        }
 897        force_successful_syscall_return();
 898        return file->f_pos;
 899}
 900
 901static int mem_release(struct inode *inode, struct file *file)
 902{
 903        struct mm_struct *mm = file->private_data;
 904        if (mm)
 905                mmdrop(mm);
 906        return 0;
 907}
 908
 909static const struct file_operations proc_mem_operations = {
 910        .llseek         = mem_lseek,
 911        .read           = mem_read,
 912        .write          = mem_write,
 913        .open           = mem_open,
 914        .release        = mem_release,
 915};
 916
 917static int environ_open(struct inode *inode, struct file *file)
 918{
 919        return __mem_open(inode, file, PTRACE_MODE_READ);
 920}
 921
 922static ssize_t environ_read(struct file *file, char __user *buf,
 923                        size_t count, loff_t *ppos)
 924{
 925        char *page;
 926        unsigned long src = *ppos;
 927        int ret = 0;
 928        struct mm_struct *mm = file->private_data;
 929        unsigned long env_start, env_end;
 930
 931        /* Ensure the process spawned far enough to have an environment. */
 932        if (!mm || !mm->env_end)
 933                return 0;
 934
 935        page = (char *)__get_free_page(GFP_KERNEL);
 936        if (!page)
 937                return -ENOMEM;
 938
 939        ret = 0;
 940        if (!mmget_not_zero(mm))
 941                goto free;
 942
 943        spin_lock(&mm->arg_lock);
 944        env_start = mm->env_start;
 945        env_end = mm->env_end;
 946        spin_unlock(&mm->arg_lock);
 947
 948        while (count > 0) {
 949                size_t this_len, max_len;
 950                int retval;
 951
 952                if (src >= (env_end - env_start))
 953                        break;
 954
 955                this_len = env_end - (env_start + src);
 956
 957                max_len = min_t(size_t, PAGE_SIZE, count);
 958                this_len = min(max_len, this_len);
 959
 960                retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
 961
 962                if (retval <= 0) {
 963                        ret = retval;
 964                        break;
 965                }
 966
 967                if (copy_to_user(buf, page, retval)) {
 968                        ret = -EFAULT;
 969                        break;
 970                }
 971
 972                ret += retval;
 973                src += retval;
 974                buf += retval;
 975                count -= retval;
 976        }
 977        *ppos = src;
 978        mmput(mm);
 979
 980free:
 981        free_page((unsigned long) page);
 982        return ret;
 983}
 984
 985static const struct file_operations proc_environ_operations = {
 986        .open           = environ_open,
 987        .read           = environ_read,
 988        .llseek         = generic_file_llseek,
 989        .release        = mem_release,
 990};
 991
 992static int auxv_open(struct inode *inode, struct file *file)
 993{
 994        return __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS);
 995}
 996
 997static ssize_t auxv_read(struct file *file, char __user *buf,
 998                        size_t count, loff_t *ppos)
 999{
1000        struct mm_struct *mm = file->private_data;
1001        unsigned int nwords = 0;
1002
1003        if (!mm)
1004                return 0;
1005        do {
1006                nwords += 2;
1007        } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
1008        return simple_read_from_buffer(buf, count, ppos, mm->saved_auxv,
1009                                       nwords * sizeof(mm->saved_auxv[0]));
1010}
1011
1012static const struct file_operations proc_auxv_operations = {
1013        .open           = auxv_open,
1014        .read           = auxv_read,
1015        .llseek         = generic_file_llseek,
1016        .release        = mem_release,
1017};
1018
1019static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
1020                            loff_t *ppos)
1021{
1022        struct task_struct *task = get_proc_task(file_inode(file));
1023        char buffer[PROC_NUMBUF];
1024        int oom_adj = OOM_ADJUST_MIN;
1025        size_t len;
1026
1027        if (!task)
1028                return -ESRCH;
1029        if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
1030                oom_adj = OOM_ADJUST_MAX;
1031        else
1032                oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
1033                          OOM_SCORE_ADJ_MAX;
1034        put_task_struct(task);
1035        len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
1036        return simple_read_from_buffer(buf, count, ppos, buffer, len);
1037}
1038
1039static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
1040{
1041        static DEFINE_MUTEX(oom_adj_mutex);
1042        struct mm_struct *mm = NULL;
1043        struct task_struct *task;
1044        int err = 0;
1045
1046        task = get_proc_task(file_inode(file));
1047        if (!task)
1048                return -ESRCH;
1049
1050        mutex_lock(&oom_adj_mutex);
1051        if (legacy) {
1052                if (oom_adj < task->signal->oom_score_adj &&
1053                                !capable(CAP_SYS_RESOURCE)) {
1054                        err = -EACCES;
1055                        goto err_unlock;
1056                }
1057                /*
1058                 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
1059                 * /proc/pid/oom_score_adj instead.
1060                 */
1061                pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
1062                          current->comm, task_pid_nr(current), task_pid_nr(task),
1063                          task_pid_nr(task));
1064        } else {
1065                if ((short)oom_adj < task->signal->oom_score_adj_min &&
1066                                !capable(CAP_SYS_RESOURCE)) {
1067                        err = -EACCES;
1068                        goto err_unlock;
1069                }
1070        }
1071
1072        /*
1073         * Make sure we will check other processes sharing the mm if this is
1074         * not vfrok which wants its own oom_score_adj.
1075         * pin the mm so it doesn't go away and get reused after task_unlock
1076         */
1077        if (!task->vfork_done) {
1078                struct task_struct *p = find_lock_task_mm(task);
1079
1080                if (p) {
1081                        if (atomic_read(&p->mm->mm_users) > 1) {
1082                                mm = p->mm;
1083                                mmgrab(mm);
1084                        }
1085                        task_unlock(p);
1086                }
1087        }
1088
1089        task->signal->oom_score_adj = oom_adj;
1090        if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
1091                task->signal->oom_score_adj_min = (short)oom_adj;
1092        trace_oom_score_adj_update(task);
1093
1094        if (mm) {
1095                struct task_struct *p;
1096
1097                rcu_read_lock();
1098                for_each_process(p) {
1099                        if (same_thread_group(task, p))
1100                                continue;
1101
1102                        /* do not touch kernel threads or the global init */
1103                        if (p->flags & PF_KTHREAD || is_global_init(p))
1104                                continue;
1105
1106                        task_lock(p);
1107                        if (!p->vfork_done && process_shares_mm(p, mm)) {
1108                                p->signal->oom_score_adj = oom_adj;
1109                                if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
1110                                        p->signal->oom_score_adj_min = (short)oom_adj;
1111                        }
1112                        task_unlock(p);
1113                }
1114                rcu_read_unlock();
1115                mmdrop(mm);
1116        }
1117err_unlock:
1118        mutex_unlock(&oom_adj_mutex);
1119        put_task_struct(task);
1120        return err;
1121}
1122
1123/*
1124 * /proc/pid/oom_adj exists solely for backwards compatibility with previous
1125 * kernels.  The effective policy is defined by oom_score_adj, which has a
1126 * different scale: oom_adj grew exponentially and oom_score_adj grows linearly.
1127 * Values written to oom_adj are simply mapped linearly to oom_score_adj.
1128 * Processes that become oom disabled via oom_adj will still be oom disabled
1129 * with this implementation.
1130 *
1131 * oom_adj cannot be removed since existing userspace binaries use it.
1132 */
1133static ssize_t oom_adj_write(struct file *file, const char __user *buf,
1134                             size_t count, loff_t *ppos)
1135{
1136        char buffer[PROC_NUMBUF];
1137        int oom_adj;
1138        int err;
1139
1140        memset(buffer, 0, sizeof(buffer));
1141        if (count > sizeof(buffer) - 1)
1142                count = sizeof(buffer) - 1;
1143        if (copy_from_user(buffer, buf, count)) {
1144                err = -EFAULT;
1145                goto out;
1146        }
1147
1148        err = kstrtoint(strstrip(buffer), 0, &oom_adj);
1149        if (err)
1150                goto out;
1151        if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) &&
1152             oom_adj != OOM_DISABLE) {
1153                err = -EINVAL;
1154                goto out;
1155        }
1156
1157        /*
1158         * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
1159         * value is always attainable.
1160         */
1161        if (oom_adj == OOM_ADJUST_MAX)
1162                oom_adj = OOM_SCORE_ADJ_MAX;
1163        else
1164                oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
1165
1166        err = __set_oom_adj(file, oom_adj, true);
1167out:
1168        return err < 0 ? err : count;
1169}
1170
1171static const struct file_operations proc_oom_adj_operations = {
1172        .read           = oom_adj_read,
1173        .write          = oom_adj_write,
1174        .llseek         = generic_file_llseek,
1175};
1176
1177static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
1178                                        size_t count, loff_t *ppos)
1179{
1180        struct task_struct *task = get_proc_task(file_inode(file));
1181        char buffer[PROC_NUMBUF];
1182        short oom_score_adj = OOM_SCORE_ADJ_MIN;
1183        size_t len;
1184
1185        if (!task)
1186                return -ESRCH;
1187        oom_score_adj = task->signal->oom_score_adj;
1188        put_task_struct(task);
1189        len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj);
1190        return simple_read_from_buffer(buf, count, ppos, buffer, len);
1191}
1192
1193static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
1194                                        size_t count, loff_t *ppos)
1195{
1196        char buffer[PROC_NUMBUF];
1197        int oom_score_adj;
1198        int err;
1199
1200        memset(buffer, 0, sizeof(buffer));
1201        if (count > sizeof(buffer) - 1)
1202                count = sizeof(buffer) - 1;
1203        if (copy_from_user(buffer, buf, count)) {
1204                err = -EFAULT;
1205                goto out;
1206        }
1207
1208        err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
1209        if (err)
1210                goto out;
1211        if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
1212                        oom_score_adj > OOM_SCORE_ADJ_MAX) {
1213                err = -EINVAL;
1214                goto out;
1215        }
1216
1217        err = __set_oom_adj(file, oom_score_adj, false);
1218out:
1219        return err < 0 ? err : count;
1220}
1221
1222static const struct file_operations proc_oom_score_adj_operations = {
1223        .read           = oom_score_adj_read,
1224        .write          = oom_score_adj_write,
1225        .llseek         = default_llseek,
1226};
1227
1228#ifdef CONFIG_AUDIT
1229#define TMPBUFLEN 11
1230static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
1231                                  size_t count, loff_t *ppos)
1232{
1233        struct inode * inode = file_inode(file);
1234        struct task_struct *task = get_proc_task(inode);
1235        ssize_t length;
1236        char tmpbuf[TMPBUFLEN];
1237
1238        if (!task)
1239                return -ESRCH;
1240        length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1241                           from_kuid(file->f_cred->user_ns,
1242                                     audit_get_loginuid(task)));
1243        put_task_struct(task);
1244        return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1245}
1246
1247static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
1248                                   size_t count, loff_t *ppos)
1249{
1250        struct inode * inode = file_inode(file);
1251        uid_t loginuid;
1252        kuid_t kloginuid;
1253        int rv;
1254
1255        rcu_read_lock();
1256        if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
1257                rcu_read_unlock();
1258                return -EPERM;
1259        }
1260        rcu_read_unlock();
1261
1262        if (*ppos != 0) {
1263                /* No partial writes. */
1264                return -EINVAL;
1265        }
1266
1267        rv = kstrtou32_from_user(buf, count, 10, &loginuid);
1268        if (rv < 0)
1269                return rv;
1270
1271        /* is userspace tring to explicitly UNSET the loginuid? */
1272        if (loginuid == AUDIT_UID_UNSET) {
1273                kloginuid = INVALID_UID;
1274        } else {
1275                kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
1276                if (!uid_valid(kloginuid))
1277                        return -EINVAL;
1278        }
1279
1280        rv = audit_set_loginuid(kloginuid);
1281        if (rv < 0)
1282                return rv;
1283        return count;
1284}
1285
1286static const struct file_operations proc_loginuid_operations = {
1287        .read           = proc_loginuid_read,
1288        .write          = proc_loginuid_write,
1289        .llseek         = generic_file_llseek,
1290};
1291
1292static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
1293                                  size_t count, loff_t *ppos)
1294{
1295        struct inode * inode = file_inode(file);
1296        struct task_struct *task = get_proc_task(inode);
1297        ssize_t length;
1298        char tmpbuf[TMPBUFLEN];
1299
1300        if (!task)
1301                return -ESRCH;
1302        length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1303                                audit_get_sessionid(task));
1304        put_task_struct(task);
1305        return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1306}
1307
1308static const struct file_operations proc_sessionid_operations = {
1309        .read           = proc_sessionid_read,
1310        .llseek         = generic_file_llseek,
1311};
1312#endif
1313
1314#ifdef CONFIG_FAULT_INJECTION
1315static ssize_t proc_fault_inject_read(struct file * file, char __user * buf,
1316                                      size_t count, loff_t *ppos)
1317{
1318        struct task_struct *task = get_proc_task(file_inode(file));
1319        char buffer[PROC_NUMBUF];
1320        size_t len;
1321        int make_it_fail;
1322
1323        if (!task)
1324                return -ESRCH;
1325        make_it_fail = task->make_it_fail;
1326        put_task_struct(task);
1327
1328        len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail);
1329
1330        return simple_read_from_buffer(buf, count, ppos, buffer, len);
1331}
1332
1333static ssize_t proc_fault_inject_write(struct file * file,
1334                        const char __user * buf, size_t count, loff_t *ppos)
1335{
1336        struct task_struct *task;
1337        char buffer[PROC_NUMBUF];
1338        int make_it_fail;
1339        int rv;
1340
1341        if (!capable(CAP_SYS_RESOURCE))
1342                return -EPERM;
1343        memset(buffer, 0, sizeof(buffer));
1344        if (count > sizeof(buffer) - 1)
1345                count = sizeof(buffer) - 1;
1346        if (copy_from_user(buffer, buf, count))
1347                return -EFAULT;
1348        rv = kstrtoint(strstrip(buffer), 0, &make_it_fail);
1349        if (rv < 0)
1350                return rv;
1351        if (make_it_fail < 0 || make_it_fail > 1)
1352                return -EINVAL;
1353
1354        task = get_proc_task(file_inode(file));
1355        if (!task)
1356                return -ESRCH;
1357        task->make_it_fail = make_it_fail;
1358        put_task_struct(task);
1359
1360        return count;
1361}
1362
1363static const struct file_operations proc_fault_inject_operations = {
1364        .read           = proc_fault_inject_read,
1365        .write          = proc_fault_inject_write,
1366        .llseek         = generic_file_llseek,
1367};
1368
1369static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf,
1370                                   size_t count, loff_t *ppos)
1371{
1372        struct task_struct *task;
1373        int err;
1374        unsigned int n;
1375
1376        err = kstrtouint_from_user(buf, count, 0, &n);
1377        if (err)
1378                return err;
1379
1380        task = get_proc_task(file_inode(file));
1381        if (!task)
1382                return -ESRCH;
1383        task->fail_nth = n;
1384        put_task_struct(task);
1385
1386        return count;
1387}
1388
1389static ssize_t proc_fail_nth_read(struct file *file, char __user *buf,
1390                                  size_t count, loff_t *ppos)
1391{
1392        struct task_struct *task;
1393        char numbuf[PROC_NUMBUF];
1394        ssize_t len;
1395
1396        task = get_proc_task(file_inode(file));
1397        if (!task)
1398                return -ESRCH;
1399        len = snprintf(numbuf, sizeof(numbuf), "%u\n", task->fail_nth);
1400        put_task_struct(task);
1401        return simple_read_from_buffer(buf, count, ppos, numbuf, len);
1402}
1403
1404static const struct file_operations proc_fail_nth_operations = {
1405        .read           = proc_fail_nth_read,
1406        .write          = proc_fail_nth_write,
1407};
1408#endif
1409
1410
1411#ifdef CONFIG_SCHED_DEBUG
1412/*
1413 * Print out various scheduling related per-task fields:
1414 */
1415static int sched_show(struct seq_file *m, void *v)
1416{
1417        struct inode *inode = m->private;
1418        struct pid_namespace *ns = proc_pid_ns(inode);
1419        struct task_struct *p;
1420
1421        p = get_proc_task(inode);
1422        if (!p)
1423                return -ESRCH;
1424        proc_sched_show_task(p, ns, m);
1425
1426        put_task_struct(p);
1427
1428        return 0;
1429}
1430
1431static ssize_t
1432sched_write(struct file *file, const char __user *buf,
1433            size_t count, loff_t *offset)
1434{
1435        struct inode *inode = file_inode(file);
1436        struct task_struct *p;
1437
1438        p = get_proc_task(inode);
1439        if (!p)
1440                return -ESRCH;
1441        proc_sched_set_task(p);
1442
1443        put_task_struct(p);
1444
1445        return count;
1446}
1447
1448static int sched_open(struct inode *inode, struct file *filp)
1449{
1450        return single_open(filp, sched_show, inode);
1451}
1452
1453static const struct file_operations proc_pid_sched_operations = {
1454        .open           = sched_open,
1455        .read           = seq_read,
1456        .write          = sched_write,
1457        .llseek         = seq_lseek,
1458        .release        = single_release,
1459};
1460
1461#endif
1462
1463#ifdef CONFIG_SCHED_AUTOGROUP
1464/*
1465 * Print out autogroup related information:
1466 */
1467static int sched_autogroup_show(struct seq_file *m, void *v)
1468{
1469        struct inode *inode = m->private;
1470        struct task_struct *p;
1471
1472        p = get_proc_task(inode);
1473        if (!p)
1474                return -ESRCH;
1475        proc_sched_autogroup_show_task(p, m);
1476
1477        put_task_struct(p);
1478
1479        return 0;
1480}
1481
1482static ssize_t
1483sched_autogroup_write(struct file *file, const char __user *buf,
1484            size_t count, loff_t *offset)
1485{
1486        struct inode *inode = file_inode(file);
1487        struct task_struct *p;
1488        char buffer[PROC_NUMBUF];
1489        int nice;
1490        int err;
1491
1492        memset(buffer, 0, sizeof(buffer));
1493        if (count > sizeof(buffer) - 1)
1494                count = sizeof(buffer) - 1;
1495        if (copy_from_user(buffer, buf, count))
1496                return -EFAULT;
1497
1498        err = kstrtoint(strstrip(buffer), 0, &nice);
1499        if (err < 0)
1500                return err;
1501
1502        p = get_proc_task(inode);
1503        if (!p)
1504                return -ESRCH;
1505
1506        err = proc_sched_autogroup_set_nice(p, nice);
1507        if (err)
1508                count = err;
1509
1510        put_task_struct(p);
1511
1512        return count;
1513}
1514
1515static int sched_autogroup_open(struct inode *inode, struct file *filp)
1516{
1517        int ret;
1518
1519        ret = single_open(filp, sched_autogroup_show, NULL);
1520        if (!ret) {
1521                struct seq_file *m = filp->private_data;
1522
1523                m->private = inode;
1524        }
1525        return ret;
1526}
1527
1528static const struct file_operations proc_pid_sched_autogroup_operations = {
1529        .open           = sched_autogroup_open,
1530        .read           = seq_read,
1531        .write          = sched_autogroup_write,
1532        .llseek         = seq_lseek,
1533        .release        = single_release,
1534};
1535
1536#endif /* CONFIG_SCHED_AUTOGROUP */
1537
1538#ifdef CONFIG_TIME_NS
1539static int timens_offsets_show(struct seq_file *m, void *v)
1540{
1541        struct task_struct *p;
1542
1543        p = get_proc_task(file_inode(m->file));
1544        if (!p)
1545                return -ESRCH;
1546        proc_timens_show_offsets(p, m);
1547
1548        put_task_struct(p);
1549
1550        return 0;
1551}
1552
1553static ssize_t timens_offsets_write(struct file *file, const char __user *buf,
1554                                    size_t count, loff_t *ppos)
1555{
1556        struct inode *inode = file_inode(file);
1557        struct proc_timens_offset offsets[2];
1558        char *kbuf = NULL, *pos, *next_line;
1559        struct task_struct *p;
1560        int ret, noffsets;
1561
1562        /* Only allow < page size writes at the beginning of the file */
1563        if ((*ppos != 0) || (count >= PAGE_SIZE))
1564                return -EINVAL;
1565
1566        /* Slurp in the user data */
1567        kbuf = memdup_user_nul(buf, count);
1568        if (IS_ERR(kbuf))
1569                return PTR_ERR(kbuf);
1570
1571        /* Parse the user data */
1572        ret = -EINVAL;
1573        noffsets = 0;
1574        for (pos = kbuf; pos; pos = next_line) {
1575                struct proc_timens_offset *off = &offsets[noffsets];
1576                char clock[10];
1577                int err;
1578
1579                /* Find the end of line and ensure we don't look past it */
1580                next_line = strchr(pos, '\n');
1581                if (next_line) {
1582                        *next_line = '\0';
1583                        next_line++;
1584                        if (*next_line == '\0')
1585                                next_line = NULL;
1586                }
1587
1588                err = sscanf(pos, "%9s %lld %lu", clock,
1589                                &off->val.tv_sec, &off->val.tv_nsec);
1590                if (err != 3 || off->val.tv_nsec >= NSEC_PER_SEC)
1591                        goto out;
1592
1593                clock[sizeof(clock) - 1] = 0;
1594                if (strcmp(clock, "monotonic") == 0 ||
1595                    strcmp(clock, __stringify(CLOCK_MONOTONIC)) == 0)
1596                        off->clockid = CLOCK_MONOTONIC;
1597                else if (strcmp(clock, "boottime") == 0 ||
1598                         strcmp(clock, __stringify(CLOCK_BOOTTIME)) == 0)
1599                        off->clockid = CLOCK_BOOTTIME;
1600                else
1601                        goto out;
1602
1603                noffsets++;
1604                if (noffsets == ARRAY_SIZE(offsets)) {
1605                        if (next_line)
1606                                count = next_line - kbuf;
1607                        break;
1608                }
1609        }
1610
1611        ret = -ESRCH;
1612        p = get_proc_task(inode);
1613        if (!p)
1614                goto out;
1615        ret = proc_timens_set_offset(file, p, offsets, noffsets);
1616        put_task_struct(p);
1617        if (ret)
1618                goto out;
1619
1620        ret = count;
1621out:
1622        kfree(kbuf);
1623        return ret;
1624}
1625
1626static int timens_offsets_open(struct inode *inode, struct file *filp)
1627{
1628        return single_open(filp, timens_offsets_show, inode);
1629}
1630
1631static const struct file_operations proc_timens_offsets_operations = {
1632        .open           = timens_offsets_open,
1633        .read           = seq_read,
1634        .write          = timens_offsets_write,
1635        .llseek         = seq_lseek,
1636        .release        = single_release,
1637};
1638#endif /* CONFIG_TIME_NS */
1639
1640static ssize_t comm_write(struct file *file, const char __user *buf,
1641                                size_t count, loff_t *offset)
1642{
1643        struct inode *inode = file_inode(file);
1644        struct task_struct *p;
1645        char buffer[TASK_COMM_LEN];
1646        const size_t maxlen = sizeof(buffer) - 1;
1647
1648        memset(buffer, 0, sizeof(buffer));
1649        if (copy_from_user(buffer, buf, count > maxlen ? maxlen : count))
1650                return -EFAULT;
1651
1652        p = get_proc_task(inode);
1653        if (!p)
1654                return -ESRCH;
1655
1656        if (same_thread_group(current, p))
1657                set_task_comm(p, buffer);
1658        else
1659                count = -EINVAL;
1660
1661        put_task_struct(p);
1662
1663        return count;
1664}
1665
1666static int comm_show(struct seq_file *m, void *v)
1667{
1668        struct inode *inode = m->private;
1669        struct task_struct *p;
1670
1671        p = get_proc_task(inode);
1672        if (!p)
1673                return -ESRCH;
1674
1675        proc_task_name(m, p, false);
1676        seq_putc(m, '\n');
1677
1678        put_task_struct(p);
1679
1680        return 0;
1681}
1682
1683static int comm_open(struct inode *inode, struct file *filp)
1684{
1685        return single_open(filp, comm_show, inode);
1686}
1687
1688static const struct file_operations proc_pid_set_comm_operations = {
1689        .open           = comm_open,
1690        .read           = seq_read,
1691        .write          = comm_write,
1692        .llseek         = seq_lseek,
1693        .release        = single_release,
1694};
1695
1696static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
1697{
1698        struct task_struct *task;
1699        struct file *exe_file;
1700
1701        task = get_proc_task(d_inode(dentry));
1702        if (!task)
1703                return -ENOENT;
1704        exe_file = get_task_exe_file(task);
1705        put_task_struct(task);
1706        if (exe_file) {
1707                *exe_path = exe_file->f_path;
1708                path_get(&exe_file->f_path);
1709                fput(exe_file);
1710                return 0;
1711        } else
1712                return -ENOENT;
1713}
1714
1715static const char *proc_pid_get_link(struct dentry *dentry,
1716                                     struct inode *inode,
1717                                     struct delayed_call *done)
1718{
1719        struct path path;
1720        int error = -EACCES;
1721
1722        if (!dentry)
1723                return ERR_PTR(-ECHILD);
1724
1725        /* Are we allowed to snoop on the tasks file descriptors? */
1726        if (!proc_fd_access_allowed(inode))
1727                goto out;
1728
1729        error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1730        if (error)
1731                goto out;
1732
1733        error = nd_jump_link(&path);
1734out:
1735        return ERR_PTR(error);
1736}
1737
1738static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1739{
1740        char *tmp = (char *)__get_free_page(GFP_KERNEL);
1741        char *pathname;
1742        int len;
1743
1744        if (!tmp)
1745                return -ENOMEM;
1746
1747        pathname = d_path(path, tmp, PAGE_SIZE);
1748        len = PTR_ERR(pathname);
1749        if (IS_ERR(pathname))
1750                goto out;
1751        len = tmp + PAGE_SIZE - 1 - pathname;
1752
1753        if (len > buflen)
1754                len = buflen;
1755        if (copy_to_user(buffer, pathname, len))
1756                len = -EFAULT;
1757 out:
1758        free_page((unsigned long)tmp);
1759        return len;
1760}
1761
1762static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen)
1763{
1764        int error = -EACCES;
1765        struct inode *inode = d_inode(dentry);
1766        struct path path;
1767
1768        /* Are we allowed to snoop on the tasks file descriptors? */
1769        if (!proc_fd_access_allowed(inode))
1770                goto out;
1771
1772        error = PROC_I(inode)->op.proc_get_link(dentry, &path);
1773        if (error)
1774                goto out;
1775
1776        error = do_proc_readlink(&path, buffer, buflen);
1777        path_put(&path);
1778out:
1779        return error;
1780}
1781
1782const struct inode_operations proc_pid_link_inode_operations = {
1783        .readlink       = proc_pid_readlink,
1784        .get_link       = proc_pid_get_link,
1785        .setattr        = proc_setattr,
1786};
1787
1788
1789/* building an inode */
1790
1791void task_dump_owner(struct task_struct *task, umode_t mode,
1792                     kuid_t *ruid, kgid_t *rgid)
1793{
1794        /* Depending on the state of dumpable compute who should own a
1795         * proc file for a task.
1796         */
1797        const struct cred *cred;
1798        kuid_t uid;
1799        kgid_t gid;
1800
1801        if (unlikely(task->flags & PF_KTHREAD)) {
1802                *ruid = GLOBAL_ROOT_UID;
1803                *rgid = GLOBAL_ROOT_GID;
1804                return;
1805        }
1806
1807        /* Default to the tasks effective ownership */
1808        rcu_read_lock();
1809        cred = __task_cred(task);
1810        uid = cred->euid;
1811        gid = cred->egid;
1812        rcu_read_unlock();
1813
1814        /*
1815         * Before the /proc/pid/status file was created the only way to read
1816         * the effective uid of a /process was to stat /proc/pid.  Reading
1817         * /proc/pid/status is slow enough that procps and other packages
1818         * kept stating /proc/pid.  To keep the rules in /proc simple I have
1819         * made this apply to all per process world readable and executable
1820         * directories.
1821         */
1822        if (mode != (S_IFDIR|S_IRUGO|S_IXUGO)) {
1823                struct mm_struct *mm;
1824                task_lock(task);
1825                mm = task->mm;
1826                /* Make non-dumpable tasks owned by some root */
1827                if (mm) {
1828                        if (get_dumpable(mm) != SUID_DUMP_USER) {
1829                                struct user_namespace *user_ns = mm->user_ns;
1830
1831                                uid = make_kuid(user_ns, 0);
1832                                if (!uid_valid(uid))
1833                                        uid = GLOBAL_ROOT_UID;
1834
1835                                gid = make_kgid(user_ns, 0);
1836                                if (!gid_valid(gid))
1837                                        gid = GLOBAL_ROOT_GID;
1838                        }
1839                } else {
1840                        uid = GLOBAL_ROOT_UID;
1841                        gid = GLOBAL_ROOT_GID;
1842                }
1843                task_unlock(task);
1844        }
1845        *ruid = uid;
1846        *rgid = gid;
1847}
1848
1849void proc_pid_evict_inode(struct proc_inode *ei)
1850{
1851        struct pid *pid = ei->pid;
1852
1853        if (S_ISDIR(ei->vfs_inode.i_mode)) {
1854                spin_lock(&pid->lock);
1855                hlist_del_init_rcu(&ei->sibling_inodes);
1856                spin_unlock(&pid->lock);
1857        }
1858
1859        put_pid(pid);
1860}
1861
1862struct inode *proc_pid_make_inode(struct super_block * sb,
1863                                  struct task_struct *task, umode_t mode)
1864{
1865        struct inode * inode;
1866        struct proc_inode *ei;
1867        struct pid *pid;
1868
1869        /* We need a new inode */
1870
1871        inode = new_inode(sb);
1872        if (!inode)
1873                goto out;
1874
1875        /* Common stuff */
1876        ei = PROC_I(inode);
1877        inode->i_mode = mode;
1878        inode->i_ino = get_next_ino();
1879        inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
1880        inode->i_op = &proc_def_inode_operations;
1881
1882        /*
1883         * grab the reference to task.
1884         */
1885        pid = get_task_pid(task, PIDTYPE_PID);
1886        if (!pid)
1887                goto out_unlock;
1888
1889        /* Let the pid remember us for quick removal */
1890        ei->pid = pid;
1891        if (S_ISDIR(mode)) {
1892                spin_lock(&pid->lock);
1893                hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes);
1894                spin_unlock(&pid->lock);
1895        }
1896
1897        task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
1898        security_task_to_inode(task, inode);
1899
1900out:
1901        return inode;
1902
1903out_unlock:
1904        iput(inode);
1905        return NULL;
1906}
1907
1908int pid_getattr(const struct path *path, struct kstat *stat,
1909                u32 request_mask, unsigned int query_flags)
1910{
1911        struct inode *inode = d_inode(path->dentry);
1912        struct pid_namespace *pid = proc_pid_ns(inode);
1913        struct task_struct *task;
1914
1915        generic_fillattr(inode, stat);
1916
1917        stat->uid = GLOBAL_ROOT_UID;
1918        stat->gid = GLOBAL_ROOT_GID;
1919        rcu_read_lock();
1920        task = pid_task(proc_pid(inode), PIDTYPE_PID);
1921        if (task) {
1922                if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) {
1923                        rcu_read_unlock();
1924                        /*
1925                         * This doesn't prevent learning whether PID exists,
1926                         * it only makes getattr() consistent with readdir().
1927                         */
1928                        return -ENOENT;
1929                }
1930                task_dump_owner(task, inode->i_mode, &stat->uid, &stat->gid);
1931        }
1932        rcu_read_unlock();
1933        return 0;
1934}
1935
1936/* dentry stuff */
1937
1938/*
1939 * Set <pid>/... inode ownership (can change due to setuid(), etc.)
1940 */
1941void pid_update_inode(struct task_struct *task, struct inode *inode)
1942{
1943        task_dump_owner(task, inode->i_mode, &inode->i_uid, &inode->i_gid);
1944
1945        inode->i_mode &= ~(S_ISUID | S_ISGID);
1946        security_task_to_inode(task, inode);
1947}
1948
1949/*
1950 * Rewrite the inode's ownerships here because the owning task may have
1951 * performed a setuid(), etc.
1952 *
1953 */
1954static int pid_revalidate(struct dentry *dentry, unsigned int flags)
1955{
1956        struct inode *inode;
1957        struct task_struct *task;
1958
1959        if (flags & LOOKUP_RCU)
1960                return -ECHILD;
1961
1962        inode = d_inode(dentry);
1963        task = get_proc_task(inode);
1964
1965        if (task) {
1966                pid_update_inode(task, inode);
1967                put_task_struct(task);
1968                return 1;
1969        }
1970        return 0;
1971}
1972
1973static inline bool proc_inode_is_dead(struct inode *inode)
1974{
1975        return !proc_pid(inode)->tasks[PIDTYPE_PID].first;
1976}
1977
1978int pid_delete_dentry(const struct dentry *dentry)
1979{
1980        /* Is the task we represent dead?
1981         * If so, then don't put the dentry on the lru list,
1982         * kill it immediately.
1983         */
1984        return proc_inode_is_dead(d_inode(dentry));
1985}
1986
1987const struct dentry_operations pid_dentry_operations =
1988{
1989        .d_revalidate   = pid_revalidate,
1990        .d_delete       = pid_delete_dentry,
1991};
1992
1993/* Lookups */
1994
1995/*
1996 * Fill a directory entry.
1997 *
1998 * If possible create the dcache entry and derive our inode number and
1999 * file type from dcache entry.
2000 *
2001 * Since all of the proc inode numbers are dynamically generated, the inode
2002 * numbers do not exist until the inode is cache.  This means creating the
2003 * the dcache entry in readdir is necessary to keep the inode numbers
2004 * reported by readdir in sync with the inode numbers reported
2005 * by stat.
2006 */
2007bool proc_fill_cache(struct file *file, struct dir_context *ctx,
2008        const char *name, unsigned int len,
2009        instantiate_t instantiate, struct task_struct *task, const void *ptr)
2010{
2011        struct dentry *child, *dir = file->f_path.dentry;
2012        struct qstr qname = QSTR_INIT(name, len);
2013        struct inode *inode;
2014        unsigned type = DT_UNKNOWN;
2015        ino_t ino = 1;
2016
2017        child = d_hash_and_lookup(dir, &qname);
2018        if (!child) {
2019                DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
2020                child = d_alloc_parallel(dir, &qname, &wq);
2021                if (IS_ERR(child))
2022                        goto end_instantiate;
2023                if (d_in_lookup(child)) {
2024                        struct dentry *res;
2025                        res = instantiate(child, task, ptr);
2026                        d_lookup_done(child);
2027                        if (unlikely(res)) {
2028                                dput(child);
2029                                child = res;
2030                                if (IS_ERR(child))
2031                                        goto end_instantiate;
2032                        }
2033                }
2034        }
2035        inode = d_inode(child);
2036        ino = inode->i_ino;
2037        type = inode->i_mode >> 12;
2038        dput(child);
2039end_instantiate:
2040        return dir_emit(ctx, name, len, ino, type);
2041}
2042
2043/*
2044 * dname_to_vma_addr - maps a dentry name into two unsigned longs
2045 * which represent vma start and end addresses.
2046 */
2047static int dname_to_vma_addr(struct dentry *dentry,
2048                             unsigned long *start, unsigned long *end)
2049{
2050        const char *str = dentry->d_name.name;
2051        unsigned long long sval, eval;
2052        unsigned int len;
2053
2054        if (str[0] == '0' && str[1] != '-')
2055                return -EINVAL;
2056        len = _parse_integer(str, 16, &sval);
2057        if (len & KSTRTOX_OVERFLOW)
2058                return -EINVAL;
2059        if (sval != (unsigned long)sval)
2060                return -EINVAL;
2061        str += len;
2062
2063        if (*str != '-')
2064                return -EINVAL;
2065        str++;
2066
2067        if (str[0] == '0' && str[1])
2068                return -EINVAL;
2069        len = _parse_integer(str, 16, &eval);
2070        if (len & KSTRTOX_OVERFLOW)
2071                return -EINVAL;
2072        if (eval != (unsigned long)eval)
2073                return -EINVAL;
2074        str += len;
2075
2076        if (*str != '\0')
2077                return -EINVAL;
2078
2079        *start = sval;
2080        *end = eval;
2081
2082        return 0;
2083}
2084
2085static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
2086{
2087        unsigned long vm_start, vm_end;
2088        bool exact_vma_exists = false;
2089        struct mm_struct *mm = NULL;
2090        struct task_struct *task;
2091        struct inode *inode;
2092        int status = 0;
2093
2094        if (flags & LOOKUP_RCU)
2095                return -ECHILD;
2096
2097        inode = d_inode(dentry);
2098        task = get_proc_task(inode);
2099        if (!task)
2100                goto out_notask;
2101
2102        mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
2103        if (IS_ERR_OR_NULL(mm))
2104                goto out;
2105
2106        if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
2107                status = down_read_killable(&mm->mmap_sem);
2108                if (!status) {
2109                        exact_vma_exists = !!find_exact_vma(mm, vm_start,
2110                                                            vm_end);
2111                        up_read(&mm->mmap_sem);
2112                }
2113        }
2114
2115        mmput(mm);
2116
2117        if (exact_vma_exists) {
2118                task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid);
2119
2120                security_task_to_inode(task, inode);
2121                status = 1;
2122        }
2123
2124out:
2125        put_task_struct(task);
2126
2127out_notask:
2128        return status;
2129}
2130
2131static const struct dentry_operations tid_map_files_dentry_operations = {
2132        .d_revalidate   = map_files_d_revalidate,
2133        .d_delete       = pid_delete_dentry,
2134};
2135
2136static int map_files_get_link(struct dentry *dentry, struct path *path)
2137{
2138        unsigned long vm_start, vm_end;
2139        struct vm_area_struct *vma;
2140        struct task_struct *task;
2141        struct mm_struct *mm;
2142        int rc;
2143
2144        rc = -ENOENT;
2145        task = get_proc_task(d_inode(dentry));
2146        if (!task)
2147                goto out;
2148
2149        mm = get_task_mm(task);
2150        put_task_struct(task);
2151        if (!mm)
2152                goto out;
2153
2154        rc = dname_to_vma_addr(dentry, &vm_start, &vm_end);
2155        if (rc)
2156                goto out_mmput;
2157
2158        rc = down_read_killable(&mm->mmap_sem);
2159        if (rc)
2160                goto out_mmput;
2161
2162        rc = -ENOENT;
2163        vma = find_exact_vma(mm, vm_start, vm_end);
2164        if (vma && vma->vm_file) {
2165                *path = vma->vm_file->f_path;
2166                path_get(path);
2167                rc = 0;
2168        }
2169        up_read(&mm->mmap_sem);
2170
2171out_mmput:
2172        mmput(mm);
2173out:
2174        return rc;
2175}
2176
2177struct map_files_info {
2178        unsigned long   start;
2179        unsigned long   end;
2180        fmode_t         mode;
2181};
2182
2183/*
2184 * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the
2185 * symlinks may be used to bypass permissions on ancestor directories in the
2186 * path to the file in question.
2187 */
2188static const char *
2189proc_map_files_get_link(struct dentry *dentry,
2190                        struct inode *inode,
2191                        struct delayed_call *done)
2192{
2193        if (!capable(CAP_SYS_ADMIN))
2194                return ERR_PTR(-EPERM);
2195
2196        return proc_pid_get_link(dentry, inode, done);
2197}
2198
2199/*
2200 * Identical to proc_pid_link_inode_operations except for get_link()
2201 */
2202static const struct inode_operations proc_map_files_link_inode_operations = {
2203        .readlink       = proc_pid_readlink,
2204        .get_link       = proc_map_files_get_link,
2205        .setattr        = proc_setattr,
2206};
2207
2208static struct dentry *
2209proc_map_files_instantiate(struct dentry *dentry,
2210                           struct task_struct *task, const void *ptr)
2211{
2212        fmode_t mode = (fmode_t)(unsigned long)ptr;
2213        struct proc_inode *ei;
2214        struct inode *inode;
2215
2216        inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK |
2217                                    ((mode & FMODE_READ ) ? S_IRUSR : 0) |
2218                                    ((mode & FMODE_WRITE) ? S_IWUSR : 0));
2219        if (!inode)
2220                return ERR_PTR(-ENOENT);
2221
2222        ei = PROC_I(inode);
2223        ei->op.proc_get_link = map_files_get_link;
2224
2225        inode->i_op = &proc_map_files_link_inode_operations;
2226        inode->i_size = 64;
2227
2228        d_set_d_op(dentry, &tid_map_files_dentry_operations);
2229        return d_splice_alias(inode, dentry);
2230}
2231
2232static struct dentry *proc_map_files_lookup(struct inode *dir,
2233                struct dentry *dentry, unsigned int flags)
2234{
2235        unsigned long vm_start, vm_end;
2236        struct vm_area_struct *vma;
2237        struct task_struct *task;
2238        struct dentry *result;
2239        struct mm_struct *mm;
2240
2241        result = ERR_PTR(-ENOENT);
2242        task = get_proc_task(dir);
2243        if (!task)
2244                goto out;
2245
2246        result = ERR_PTR(-EACCES);
2247        if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
2248                goto out_put_task;
2249
2250        result = ERR_PTR(-ENOENT);
2251        if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
2252                goto out_put_task;
2253
2254        mm = get_task_mm(task);
2255        if (!mm)
2256                goto out_put_task;
2257
2258        result = ERR_PTR(-EINTR);
2259        if (down_read_killable(&mm->mmap_sem))
2260                goto out_put_mm;
2261
2262        result = ERR_PTR(-ENOENT);
2263        vma = find_exact_vma(mm, vm_start, vm_end);
2264        if (!vma)
2265                goto out_no_vma;
2266
2267        if (vma->vm_file)
2268                result = proc_map_files_instantiate(dentry, task,
2269                                (void *)(unsigned long)vma->vm_file->f_mode);
2270
2271out_no_vma:
2272        up_read(&mm->mmap_sem);
2273out_put_mm:
2274        mmput(mm);
2275out_put_task:
2276        put_task_struct(task);
2277out:
2278        return result;
2279}
2280
2281static const struct inode_operations proc_map_files_inode_operations = {
2282        .lookup         = proc_map_files_lookup,
2283        .permission     = proc_fd_permission,
2284        .setattr        = proc_setattr,
2285};
2286
2287static int
2288proc_map_files_readdir(struct file *file, struct dir_context *ctx)
2289{
2290        struct vm_area_struct *vma;
2291        struct task_struct *task;
2292        struct mm_struct *mm;
2293        unsigned long nr_files, pos, i;
2294        GENRADIX(struct map_files_info) fa;
2295        struct map_files_info *p;
2296        int ret;
2297
2298        genradix_init(&fa);
2299
2300        ret = -ENOENT;
2301        task = get_proc_task(file_inode(file));
2302        if (!task)
2303                goto out;
2304
2305        ret = -EACCES;
2306        if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
2307                goto out_put_task;
2308
2309        ret = 0;
2310        if (!dir_emit_dots(file, ctx))
2311                goto out_put_task;
2312
2313        mm = get_task_mm(task);
2314        if (!mm)
2315                goto out_put_task;
2316
2317        ret = down_read_killable(&mm->mmap_sem);
2318        if (ret) {
2319                mmput(mm);
2320                goto out_put_task;
2321        }
2322
2323        nr_files = 0;
2324
2325        /*
2326         * We need two passes here:
2327         *
2328         *  1) Collect vmas of mapped files with mmap_sem taken
2329         *  2) Release mmap_sem and instantiate entries
2330         *
2331         * otherwise we get lockdep complained, since filldir()
2332         * routine might require mmap_sem taken in might_fault().
2333         */
2334
2335        for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
2336                if (!vma->vm_file)
2337                        continue;
2338                if (++pos <= ctx->pos)
2339                        continue;
2340
2341                p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
2342                if (!p) {
2343                        ret = -ENOMEM;
2344                        up_read(&mm->mmap_sem);
2345                        mmput(mm);
2346                        goto out_put_task;
2347                }
2348
2349                p->start = vma->vm_start;
2350                p->end = vma->vm_end;
2351                p->mode = vma->vm_file->f_mode;
2352        }
2353        up_read(&mm->mmap_sem);
2354        mmput(mm);
2355
2356        for (i = 0; i < nr_files; i++) {
2357                char buf[4 * sizeof(long) + 2]; /* max: %lx-%lx\0 */
2358                unsigned int len;
2359
2360                p = genradix_ptr(&fa, i);
2361                len = snprintf(buf, sizeof(buf), "%lx-%lx", p->start, p->end);
2362                if (!proc_fill_cache(file, ctx,
2363                                      buf, len,
2364                                      proc_map_files_instantiate,
2365                                      task,
2366                                      (void *)(unsigned long)p->mode))
2367                        break;
2368                ctx->pos++;
2369        }
2370
2371out_put_task:
2372        put_task_struct(task);
2373out:
2374        genradix_free(&fa);
2375        return ret;
2376}
2377
2378static const struct file_operations proc_map_files_operations = {
2379        .read           = generic_read_dir,
2380        .iterate_shared = proc_map_files_readdir,
2381        .llseek         = generic_file_llseek,
2382};
2383
2384#if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS)
2385struct timers_private {
2386        struct pid *pid;
2387        struct task_struct *task;
2388        struct sighand_struct *sighand;
2389        struct pid_namespace *ns;
2390        unsigned long flags;
2391};
2392
2393static void *timers_start(struct seq_file *m, loff_t *pos)
2394{
2395        struct timers_private *tp = m->private;
2396
2397        tp->task = get_pid_task(tp->pid, PIDTYPE_PID);
2398        if (!tp->task)
2399                return ERR_PTR(-ESRCH);
2400
2401        tp->sighand = lock_task_sighand(tp->task, &tp->flags);
2402        if (!tp->sighand)
2403                return ERR_PTR(-ESRCH);
2404
2405        return seq_list_start(&tp->task->signal->posix_timers, *pos);
2406}
2407
2408static void *timers_next(struct seq_file *m, void *v, loff_t *pos)
2409{
2410        struct timers_private *tp = m->private;
2411        return seq_list_next(v, &tp->task->signal->posix_timers, pos);
2412}
2413
2414static void timers_stop(struct seq_file *m, void *v)
2415{
2416        struct timers_private *tp = m->private;
2417
2418        if (tp->sighand) {
2419                unlock_task_sighand(tp->task, &tp->flags);
2420                tp->sighand = NULL;
2421        }
2422
2423        if (tp->task) {
2424                put_task_struct(tp->task);
2425                tp->task = NULL;
2426        }
2427}
2428
2429static int show_timer(struct seq_file *m, void *v)
2430{
2431        struct k_itimer *timer;
2432        struct timers_private *tp = m->private;
2433        int notify;
2434        static const char * const nstr[] = {
2435                [SIGEV_SIGNAL] = "signal",
2436                [SIGEV_NONE] = "none",
2437                [SIGEV_THREAD] = "thread",
2438        };
2439
2440        timer = list_entry((struct list_head *)v, struct k_itimer, list);
2441        notify = timer->it_sigev_notify;
2442
2443        seq_printf(m, "ID: %d\n", timer->it_id);
2444        seq_printf(m, "signal: %d/%px\n",
2445                   timer->sigq->info.si_signo,
2446                   timer->sigq->info.si_value.sival_ptr);
2447        seq_printf(m, "notify: %s/%s.%d\n",
2448                   nstr[notify & ~SIGEV_THREAD_ID],
2449                   (notify & SIGEV_THREAD_ID) ? "tid" : "pid",
2450                   pid_nr_ns(timer->it_pid, tp->ns));
2451        seq_printf(m, "ClockID: %d\n", timer->it_clock);
2452
2453        return 0;
2454}
2455
2456static const struct seq_operations proc_timers_seq_ops = {
2457        .start  = timers_start,
2458        .next   = timers_next,
2459        .stop   = timers_stop,
2460        .show   = show_timer,
2461};
2462
2463static int proc_timers_open(struct inode *inode, struct file *file)
2464{
2465        struct timers_private *tp;
2466
2467        tp = __seq_open_private(file, &proc_timers_seq_ops,
2468                        sizeof(struct timers_private));
2469        if (!tp)
2470                return -ENOMEM;
2471
2472        tp->pid = proc_pid(inode);
2473        tp->ns = proc_pid_ns(inode);
2474        return 0;
2475}
2476
2477static const struct file_operations proc_timers_operations = {
2478        .open           = proc_timers_open,
2479        .read           = seq_read,
2480        .llseek         = seq_lseek,
2481        .release        = seq_release_private,
2482};
2483#endif
2484
2485static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
2486                                        size_t count, loff_t *offset)
2487{
2488        struct inode *inode = file_inode(file);
2489        struct task_struct *p;
2490        u64 slack_ns;
2491        int err;
2492
2493        err = kstrtoull_from_user(buf, count, 10, &slack_ns);
2494        if (err < 0)
2495                return err;
2496
2497        p = get_proc_task(inode);
2498        if (!p)
2499                return -ESRCH;
2500
2501        if (p != current) {
2502                rcu_read_lock();
2503                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
2504                        rcu_read_unlock();
2505                        count = -EPERM;
2506                        goto out;
2507                }
2508                rcu_read_unlock();
2509
2510                err = security_task_setscheduler(p);
2511                if (err) {
2512                        count = err;
2513                        goto out;
2514                }
2515        }
2516
2517        task_lock(p);
2518        if (slack_ns == 0)
2519                p->timer_slack_ns = p->default_timer_slack_ns;
2520        else
2521                p->timer_slack_ns = slack_ns;
2522        task_unlock(p);
2523
2524out:
2525        put_task_struct(p);
2526
2527        return count;
2528}
2529
2530static int timerslack_ns_show(struct seq_file *m, void *v)
2531{
2532        struct inode *inode = m->private;
2533        struct task_struct *p;
2534        int err = 0;
2535
2536        p = get_proc_task(inode);
2537        if (!p)
2538                return -ESRCH;
2539
2540        if (p != current) {
2541                rcu_read_lock();
2542                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
2543                        rcu_read_unlock();
2544                        err = -EPERM;
2545                        goto out;
2546                }
2547                rcu_read_unlock();
2548
2549                err = security_task_getscheduler(p);
2550                if (err)
2551                        goto out;
2552        }
2553
2554        task_lock(p);
2555        seq_printf(m, "%llu\n", p->timer_slack_ns);
2556        task_unlock(p);
2557
2558out:
2559        put_task_struct(p);
2560
2561        return err;
2562}
2563
2564static int timerslack_ns_open(struct inode *inode, struct file *filp)
2565{
2566        return single_open(filp, timerslack_ns_show, inode);
2567}
2568
2569static const struct file_operations proc_pid_set_timerslack_ns_operations = {
2570        .open           = timerslack_ns_open,
2571        .read           = seq_read,
2572        .write          = timerslack_ns_write,
2573        .llseek         = seq_lseek,
2574        .release        = single_release,
2575};
2576
2577static struct dentry *proc_pident_instantiate(struct dentry *dentry,
2578        struct task_struct *task, const void *ptr)
2579{
2580        const struct pid_entry *p = ptr;
2581        struct inode *inode;
2582        struct proc_inode *ei;
2583
2584        inode = proc_pid_make_inode(dentry->d_sb, task, p->mode);
2585        if (!inode)
2586                return ERR_PTR(-ENOENT);
2587
2588        ei = PROC_I(inode);
2589        if (S_ISDIR(inode->i_mode))
2590                set_nlink(inode, 2);    /* Use getattr to fix if necessary */
2591        if (p->iop)
2592                inode->i_op = p->iop;
2593        if (p->fop)
2594                inode->i_fop = p->fop;
2595        ei->op = p->op;
2596        pid_update_inode(task, inode);
2597        d_set_d_op(dentry, &pid_dentry_operations);
2598        return d_splice_alias(inode, dentry);
2599}
2600
2601static struct dentry *proc_pident_lookup(struct inode *dir, 
2602                                         struct dentry *dentry,
2603                                         const struct pid_entry *p,
2604                                         const struct pid_entry *end)
2605{
2606        struct task_struct *task = get_proc_task(dir);
2607        struct dentry *res = ERR_PTR(-ENOENT);
2608
2609        if (!task)
2610                goto out_no_task;
2611
2612        /*
2613         * Yes, it does not scale. And it should not. Don't add
2614         * new entries into /proc/<tgid>/ without very good reasons.
2615         */
2616        for (; p < end; p++) {
2617                if (p->len != dentry->d_name.len)
2618                        continue;
2619                if (!memcmp(dentry->d_name.name, p->name, p->len)) {
2620                        res = proc_pident_instantiate(dentry, task, p);
2621                        break;
2622                }
2623        }
2624        put_task_struct(task);
2625out_no_task:
2626        return res;
2627}
2628
2629static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
2630                const struct pid_entry *ents, unsigned int nents)
2631{
2632        struct task_struct *task = get_proc_task(file_inode(file));
2633        const struct pid_entry *p;
2634
2635        if (!task)
2636                return -ENOENT;
2637
2638        if (!dir_emit_dots(file, ctx))
2639                goto out;
2640
2641        if (ctx->pos >= nents + 2)
2642                goto out;
2643
2644        for (p = ents + (ctx->pos - 2); p < ents + nents; p++) {
2645                if (!proc_fill_cache(file, ctx, p->name, p->len,
2646                                proc_pident_instantiate, task, p))
2647                        break;
2648                ctx->pos++;
2649        }
2650out:
2651        put_task_struct(task);
2652        return 0;
2653}
2654
2655#ifdef CONFIG_SECURITY
2656static ssize_t proc_pid_attr_read(struct file * file, char __user * buf,
2657                                  size_t count, loff_t *ppos)
2658{
2659        struct inode * inode = file_inode(file);
2660        char *p = NULL;
2661        ssize_t length;
2662        struct task_struct *task = get_proc_task(inode);
2663
2664        if (!task)
2665                return -ESRCH;
2666
2667        length = security_getprocattr(task, PROC_I(inode)->op.lsm,
2668                                      (char*)file->f_path.dentry->d_name.name,
2669                                      &p);
2670        put_task_struct(task);
2671        if (length > 0)
2672                length = simple_read_from_buffer(buf, count, ppos, p, length);
2673        kfree(p);
2674        return length;
2675}
2676
2677static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
2678                                   size_t count, loff_t *ppos)
2679{
2680        struct inode * inode = file_inode(file);
2681        struct task_struct *task;
2682        void *page;
2683        int rv;
2684
2685        rcu_read_lock();
2686        task = pid_task(proc_pid(inode), PIDTYPE_PID);
2687        if (!task) {
2688                rcu_read_unlock();
2689                return -ESRCH;
2690        }
2691        /* A task may only write its own attributes. */
2692        if (current != task) {
2693                rcu_read_unlock();
2694                return -EACCES;
2695        }
2696        /* Prevent changes to overridden credentials. */
2697        if (current_cred() != current_real_cred()) {
2698                rcu_read_unlock();
2699                return -EBUSY;
2700        }
2701        rcu_read_unlock();
2702
2703        if (count > PAGE_SIZE)
2704                count = PAGE_SIZE;
2705
2706        /* No partial writes. */
2707        if (*ppos != 0)
2708                return -EINVAL;
2709
2710        page = memdup_user(buf, count);
2711        if (IS_ERR(page)) {
2712                rv = PTR_ERR(page);
2713                goto out;
2714        }
2715
2716        /* Guard against adverse ptrace interaction */
2717        rv = mutex_lock_interruptible(&current->signal->cred_guard_mutex);
2718        if (rv < 0)
2719                goto out_free;
2720
2721        rv = security_setprocattr(PROC_I(inode)->op.lsm,
2722                                  file->f_path.dentry->d_name.name, page,
2723                                  count);
2724        mutex_unlock(&current->signal->cred_guard_mutex);
2725out_free:
2726        kfree(page);
2727out:
2728        return rv;
2729}
2730
2731static const struct file_operations proc_pid_attr_operations = {
2732        .read           = proc_pid_attr_read,
2733        .write          = proc_pid_attr_write,
2734        .llseek         = generic_file_llseek,
2735};
2736
2737#define LSM_DIR_OPS(LSM) \
2738static int proc_##LSM##_attr_dir_iterate(struct file *filp, \
2739                             struct dir_context *ctx) \
2740{ \
2741        return proc_pident_readdir(filp, ctx, \
2742                                   LSM##_attr_dir_stuff, \
2743                                   ARRAY_SIZE(LSM##_attr_dir_stuff)); \
2744} \
2745\
2746static const struct file_operations proc_##LSM##_attr_dir_ops = { \
2747        .read           = generic_read_dir, \
2748        .iterate        = proc_##LSM##_attr_dir_iterate, \
2749        .llseek         = default_llseek, \
2750}; \
2751\
2752static struct dentry *proc_##LSM##_attr_dir_lookup(struct inode *dir, \
2753                                struct dentry *dentry, unsigned int flags) \
2754{ \
2755        return proc_pident_lookup(dir, dentry, \
2756                                  LSM##_attr_dir_stuff, \
2757                                  LSM##_attr_dir_stuff + ARRAY_SIZE(LSM##_attr_dir_stuff)); \
2758} \
2759\
2760static const struct inode_operations proc_##LSM##_attr_dir_inode_ops = { \
2761        .lookup         = proc_##LSM##_attr_dir_lookup, \
2762        .getattr        = pid_getattr, \
2763        .setattr        = proc_setattr, \
2764}
2765
2766#ifdef CONFIG_SECURITY_SMACK
2767static const struct pid_entry smack_attr_dir_stuff[] = {
2768        ATTR("smack", "current",        0666),
2769};
2770LSM_DIR_OPS(smack);
2771#endif
2772
2773static const struct pid_entry attr_dir_stuff[] = {
2774        ATTR(NULL, "current",           0666),
2775        ATTR(NULL, "prev",              0444),
2776        ATTR(NULL, "exec",              0666),
2777        ATTR(NULL, "fscreate",          0666),
2778        ATTR(NULL, "keycreate",         0666),
2779        ATTR(NULL, "sockcreate",        0666),
2780#ifdef CONFIG_SECURITY_SMACK
2781        DIR("smack",                    0555,
2782            proc_smack_attr_dir_inode_ops, proc_smack_attr_dir_ops),
2783#endif
2784};
2785
2786static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
2787{
2788        return proc_pident_readdir(file, ctx, 
2789                                   attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
2790}
2791
2792static const struct file_operations proc_attr_dir_operations = {
2793        .read           = generic_read_dir,
2794        .iterate_shared = proc_attr_dir_readdir,
2795        .llseek         = generic_file_llseek,
2796};
2797
2798static struct dentry *proc_attr_dir_lookup(struct inode *dir,
2799                                struct dentry *dentry, unsigned int flags)
2800{
2801        return proc_pident_lookup(dir, dentry,
2802                                  attr_dir_stuff,
2803                                  attr_dir_stuff + ARRAY_SIZE(attr_dir_stuff));
2804}
2805
2806static const struct inode_operations proc_attr_dir_inode_operations = {
2807        .lookup         = proc_attr_dir_lookup,
2808        .getattr        = pid_getattr,
2809        .setattr        = proc_setattr,
2810};
2811
2812#endif
2813
2814#ifdef CONFIG_ELF_CORE
2815static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
2816                                         size_t count, loff_t *ppos)
2817{
2818        struct task_struct *task = get_proc_task(file_inode(file));
2819        struct mm_struct *mm;
2820        char buffer[PROC_NUMBUF];
2821        size_t len;
2822        int ret;
2823
2824        if (!task)
2825                return -ESRCH;
2826
2827        ret = 0;
2828        mm = get_task_mm(task);
2829        if (mm) {
2830                len = snprintf(buffer, sizeof(buffer), "%08lx\n",
2831                               ((mm->flags & MMF_DUMP_FILTER_MASK) >>
2832                                MMF_DUMP_FILTER_SHIFT));
2833                mmput(mm);
2834                ret = simple_read_from_buffer(buf, count, ppos, buffer, len);
2835        }
2836
2837        put_task_struct(task);
2838
2839        return ret;
2840}
2841
2842static ssize_t proc_coredump_filter_write(struct file *file,
2843                                          const char __user *buf,
2844                                          size_t count,
2845                                          loff_t *ppos)
2846{
2847        struct task_struct *task;
2848        struct mm_struct *mm;
2849        unsigned int val;
2850        int ret;
2851        int i;
2852        unsigned long mask;
2853
2854        ret = kstrtouint_from_user(buf, count, 0, &val);
2855        if (ret < 0)
2856                return ret;
2857
2858        ret = -ESRCH;
2859        task = get_proc_task(file_inode(file));
2860        if (!task)
2861                goto out_no_task;
2862
2863        mm = get_task_mm(task);
2864        if (!mm)
2865                goto out_no_mm;
2866        ret = 0;
2867
2868        for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
2869                if (val & mask)
2870                        set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2871                else
2872                        clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags);
2873        }
2874
2875        mmput(mm);
2876 out_no_mm:
2877        put_task_struct(task);
2878 out_no_task:
2879        if (ret < 0)
2880                return ret;
2881        return count;
2882}
2883
2884static const struct file_operations proc_coredump_filter_operations = {
2885        .read           = proc_coredump_filter_read,
2886        .write          = proc_coredump_filter_write,
2887        .llseek         = generic_file_llseek,
2888};
2889#endif
2890
2891#ifdef CONFIG_TASK_IO_ACCOUNTING
2892static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole)
2893{
2894        struct task_io_accounting acct = task->ioac;
2895        unsigned long flags;
2896        int result;
2897
2898        result = mutex_lock_killable(&task->signal->exec_update_mutex);
2899        if (result)
2900                return result;
2901
2902        if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
2903                result = -EACCES;
2904                goto out_unlock;
2905        }
2906
2907        if (whole && lock_task_sighand(task, &flags)) {
2908                struct task_struct *t = task;
2909
2910                task_io_accounting_add(&acct, &task->signal->ioac);
2911                while_each_thread(task, t)
2912                        task_io_accounting_add(&acct, &t->ioac);
2913
2914                unlock_task_sighand(task, &flags);
2915        }
2916        seq_printf(m,
2917                   "rchar: %llu\n"
2918                   "wchar: %llu\n"
2919                   "syscr: %llu\n"
2920                   "syscw: %llu\n"
2921                   "read_bytes: %llu\n"
2922                   "write_bytes: %llu\n"
2923                   "cancelled_write_bytes: %llu\n",
2924                   (unsigned long long)acct.rchar,
2925                   (unsigned long long)acct.wchar,
2926                   (unsigned long long)acct.syscr,
2927                   (unsigned long long)acct.syscw,
2928                   (unsigned long long)acct.read_bytes,
2929                   (unsigned long long)acct.write_bytes,
2930                   (unsigned long long)acct.cancelled_write_bytes);
2931        result = 0;
2932
2933out_unlock:
2934        mutex_unlock(&task->signal->exec_update_mutex);
2935        return result;
2936}
2937
2938static int proc_tid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
2939                                  struct pid *pid, struct task_struct *task)
2940{
2941        return do_io_accounting(task, m, 0);
2942}
2943
2944static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
2945                                   struct pid *pid, struct task_struct *task)
2946{
2947        return do_io_accounting(task, m, 1);
2948}
2949#endif /* CONFIG_TASK_IO_ACCOUNTING */
2950
2951#ifdef CONFIG_USER_NS
2952static int proc_id_map_open(struct inode *inode, struct file *file,
2953        const struct seq_operations *seq_ops)
2954{
2955        struct user_namespace *ns = NULL;
2956        struct task_struct *task;
2957        struct seq_file *seq;
2958        int ret = -EINVAL;
2959
2960        task = get_proc_task(inode);
2961        if (task) {
2962                rcu_read_lock();
2963                ns = get_user_ns(task_cred_xxx(task, user_ns));
2964                rcu_read_unlock();
2965                put_task_struct(task);
2966        }
2967        if (!ns)
2968                goto err;
2969
2970        ret = seq_open(file, seq_ops);
2971        if (ret)
2972                goto err_put_ns;
2973
2974        seq = file->private_data;
2975        seq->private = ns;
2976
2977        return 0;
2978err_put_ns:
2979        put_user_ns(ns);
2980err:
2981        return ret;
2982}
2983
2984static int proc_id_map_release(struct inode *inode, struct file *file)
2985{
2986        struct seq_file *seq = file->private_data;
2987        struct user_namespace *ns = seq->private;
2988        put_user_ns(ns);
2989        return seq_release(inode, file);
2990}
2991
2992static int proc_uid_map_open(struct inode *inode, struct file *file)
2993{
2994        return proc_id_map_open(inode, file, &proc_uid_seq_operations);
2995}
2996
2997static int proc_gid_map_open(struct inode *inode, struct file *file)
2998{
2999        return proc_id_map_open(inode, file, &proc_gid_seq_operations);
3000}
3001
3002static int proc_projid_map_open(struct inode *inode, struct file *file)
3003{
3004        return proc_id_map_open(inode, file, &proc_projid_seq_operations);
3005}
3006
3007static const struct file_operations proc_uid_map_operations = {
3008        .open           = proc_uid_map_open,
3009        .write          = proc_uid_map_write,
3010        .read           = seq_read,
3011        .llseek         = seq_lseek,
3012        .release        = proc_id_map_release,
3013};
3014
3015static const struct file_operations proc_gid_map_operations = {
3016        .open           = proc_gid_map_open,
3017        .write          = proc_gid_map_write,
3018        .read           = seq_read,
3019        .llseek         = seq_lseek,
3020        .release        = proc_id_map_release,
3021};
3022
3023static const struct file_operations proc_projid_map_operations = {
3024        .open           = proc_projid_map_open,
3025        .write          = proc_projid_map_write,
3026        .read           = seq_read,
3027        .llseek         = seq_lseek,
3028        .release        = proc_id_map_release,
3029};
3030
3031static int proc_setgroups_open(struct inode *inode, struct file *file)
3032{
3033        struct user_namespace *ns = NULL;
3034        struct task_struct *task;
3035        int ret;
3036
3037        ret = -ESRCH;
3038        task = get_proc_task(inode);
3039        if (task) {
3040                rcu_read_lock();
3041                ns = get_user_ns(task_cred_xxx(task, user_ns));
3042                rcu_read_unlock();
3043                put_task_struct(task);
3044        }
3045        if (!ns)
3046                goto err;
3047
3048        if (file->f_mode & FMODE_WRITE) {
3049                ret = -EACCES;
3050                if (!ns_capable(ns, CAP_SYS_ADMIN))
3051                        goto err_put_ns;
3052        }
3053
3054        ret = single_open(file, &proc_setgroups_show, ns);
3055        if (ret)
3056                goto err_put_ns;
3057
3058        return 0;
3059err_put_ns:
3060        put_user_ns(ns);
3061err:
3062        return ret;
3063}
3064
3065static int proc_setgroups_release(struct inode *inode, struct file *file)
3066{
3067        struct seq_file *seq = file->private_data;
3068        struct user_namespace *ns = seq->private;
3069        int ret = single_release(inode, file);
3070        put_user_ns(ns);
3071        return ret;
3072}
3073
3074static const struct file_operations proc_setgroups_operations = {
3075        .open           = proc_setgroups_open,
3076        .write          = proc_setgroups_write,
3077        .read           = seq_read,
3078        .llseek         = seq_lseek,
3079        .release        = proc_setgroups_release,
3080};
3081#endif /* CONFIG_USER_NS */
3082
3083static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
3084                                struct pid *pid, struct task_struct *task)
3085{
3086        int err = lock_trace(task);
3087        if (!err) {
3088                seq_printf(m, "%08x\n", task->personality);
3089                unlock_trace(task);
3090        }
3091        return err;
3092}
3093
3094#ifdef CONFIG_LIVEPATCH
3095static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns,
3096                                struct pid *pid, struct task_struct *task)
3097{
3098        seq_printf(m, "%d\n", task->patch_state);
3099        return 0;
3100}
3101#endif /* CONFIG_LIVEPATCH */
3102
3103#ifdef CONFIG_STACKLEAK_METRICS
3104static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns,
3105                                struct pid *pid, struct task_struct *task)
3106{
3107        unsigned long prev_depth = THREAD_SIZE -
3108                                (task->prev_lowest_stack & (THREAD_SIZE - 1));
3109        unsigned long depth = THREAD_SIZE -
3110                                (task->lowest_stack & (THREAD_SIZE - 1));
3111
3112        seq_printf(m, "previous stack depth: %lu\nstack depth: %lu\n",
3113                                                        prev_depth, depth);
3114        return 0;
3115}
3116#endif /* CONFIG_STACKLEAK_METRICS */
3117
3118/*
3119 * Thread groups
3120 */
3121static const struct file_operations proc_task_operations;
3122static const struct inode_operations proc_task_inode_operations;
3123
3124static const struct pid_entry tgid_base_stuff[] = {
3125        DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
3126        DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
3127        DIR("map_files",  S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
3128        DIR("fdinfo",     S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
3129        DIR("ns",         S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
3130#ifdef CONFIG_NET
3131        DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
3132#endif
3133        REG("environ",    S_IRUSR, proc_environ_operations),
3134        REG("auxv",       S_IRUSR, proc_auxv_operations),
3135        ONE("status",     S_IRUGO, proc_pid_status),
3136        ONE("personality", S_IRUSR, proc_pid_personality),
3137        ONE("limits",     S_IRUGO, proc_pid_limits),
3138#ifdef CONFIG_SCHED_DEBUG
3139        REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3140#endif
3141#ifdef CONFIG_SCHED_AUTOGROUP
3142        REG("autogroup",  S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
3143#endif
3144#ifdef CONFIG_TIME_NS
3145        REG("timens_offsets",  S_IRUGO|S_IWUSR, proc_timens_offsets_operations),
3146#endif
3147        REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
3148#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3149        ONE("syscall",    S_IRUSR, proc_pid_syscall),
3150#endif
3151        REG("cmdline",    S_IRUGO, proc_pid_cmdline_ops),
3152        ONE("stat",       S_IRUGO, proc_tgid_stat),
3153        ONE("statm",      S_IRUGO, proc_pid_statm),
3154        REG("maps",       S_IRUGO, proc_pid_maps_operations),
3155#ifdef CONFIG_NUMA
3156        REG("numa_maps",  S_IRUGO, proc_pid_numa_maps_operations),
3157#endif
3158        REG("mem",        S_IRUSR|S_IWUSR, proc_mem_operations),
3159        LNK("cwd",        proc_cwd_link),
3160        LNK("root",       proc_root_link),
3161        LNK("exe",        proc_exe_link),
3162        REG("mounts",     S_IRUGO, proc_mounts_operations),
3163        REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
3164        REG("mountstats", S_IRUSR, proc_mountstats_operations),
3165#ifdef CONFIG_PROC_PAGE_MONITOR
3166        REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
3167        REG("smaps",      S_IRUGO, proc_pid_smaps_operations),
3168        REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations),
3169        REG("pagemap",    S_IRUSR, proc_pagemap_operations),
3170#endif
3171#ifdef CONFIG_SECURITY
3172        DIR("attr",       S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
3173#endif
3174#ifdef CONFIG_KALLSYMS
3175        ONE("wchan",      S_IRUGO, proc_pid_wchan),
3176#endif
3177#ifdef CONFIG_STACKTRACE
3178        ONE("stack",      S_IRUSR, proc_pid_stack),
3179#endif
3180#ifdef CONFIG_SCHED_INFO
3181        ONE("schedstat",  S_IRUGO, proc_pid_schedstat),
3182#endif
3183#ifdef CONFIG_LATENCYTOP
3184        REG("latency",  S_IRUGO, proc_lstats_operations),
3185#endif
3186#ifdef CONFIG_PROC_PID_CPUSET
3187        ONE("cpuset",     S_IRUGO, proc_cpuset_show),
3188#endif
3189#ifdef CONFIG_CGROUPS
3190        ONE("cgroup",  S_IRUGO, proc_cgroup_show),
3191#endif
3192#ifdef CONFIG_PROC_CPU_RESCTRL
3193        ONE("cpu_resctrl_groups", S_IRUGO, proc_resctrl_show),
3194#endif
3195        ONE("oom_score",  S_IRUGO, proc_oom_score),
3196        REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
3197        REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
3198#ifdef CONFIG_AUDIT
3199        REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
3200        REG("sessionid",  S_IRUGO, proc_sessionid_operations),
3201#endif
3202#ifdef CONFIG_FAULT_INJECTION
3203        REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
3204        REG("fail-nth", 0644, proc_fail_nth_operations),
3205#endif
3206#ifdef CONFIG_ELF_CORE
3207        REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
3208#endif
3209#ifdef CONFIG_TASK_IO_ACCOUNTING
3210        ONE("io",       S_IRUSR, proc_tgid_io_accounting),
3211#endif
3212#ifdef CONFIG_USER_NS
3213        REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
3214        REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
3215        REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
3216        REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
3217#endif
3218#if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS)
3219        REG("timers",     S_IRUGO, proc_timers_operations),
3220#endif
3221        REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
3222#ifdef CONFIG_LIVEPATCH
3223        ONE("patch_state",  S_IRUSR, proc_pid_patch_state),
3224#endif
3225#ifdef CONFIG_STACKLEAK_METRICS
3226        ONE("stack_depth", S_IRUGO, proc_stack_depth),
3227#endif
3228#ifdef CONFIG_PROC_PID_ARCH_STATUS
3229        ONE("arch_status", S_IRUGO, proc_pid_arch_status),
3230#endif
3231};
3232
3233static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
3234{
3235        return proc_pident_readdir(file, ctx,
3236                                   tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
3237}
3238
3239static const struct file_operations proc_tgid_base_operations = {
3240        .read           = generic_read_dir,
3241        .iterate_shared = proc_tgid_base_readdir,
3242        .llseek         = generic_file_llseek,
3243};
3244
3245struct pid *tgid_pidfd_to_pid(const struct file *file)
3246{
3247        if (file->f_op != &proc_tgid_base_operations)
3248                return ERR_PTR(-EBADF);
3249
3250        return proc_pid(file_inode(file));
3251}
3252
3253static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
3254{
3255        return proc_pident_lookup(dir, dentry,
3256                                  tgid_base_stuff,
3257                                  tgid_base_stuff + ARRAY_SIZE(tgid_base_stuff));
3258}
3259
3260static const struct inode_operations proc_tgid_base_inode_operations = {
3261        .lookup         = proc_tgid_base_lookup,
3262        .getattr        = pid_getattr,
3263        .setattr        = proc_setattr,
3264        .permission     = proc_pid_permission,
3265};
3266
3267/**
3268 * proc_flush_pid -  Remove dcache entries for @pid from the /proc dcache.
3269 * @pid: pid that should be flushed.
3270 *
3271 * This function walks a list of inodes (that belong to any proc
3272 * filesystem) that are attached to the pid and flushes them from
3273 * the dentry cache.
3274 *
3275 * It is safe and reasonable to cache /proc entries for a task until
3276 * that task exits.  After that they just clog up the dcache with
3277 * useless entries, possibly causing useful dcache entries to be
3278 * flushed instead.  This routine is provided to flush those useless
3279 * dcache entries when a process is reaped.
3280 *
3281 * NOTE: This routine is just an optimization so it does not guarantee
3282 *       that no dcache entries will exist after a process is reaped
3283 *       it just makes it very unlikely that any will persist.
3284 */
3285
3286void proc_flush_pid(struct pid *pid)
3287{
3288        proc_invalidate_siblings_dcache(&pid->inodes, &pid->lock);
3289}
3290
3291static struct dentry *proc_pid_instantiate(struct dentry * dentry,
3292                                   struct task_struct *task, const void *ptr)
3293{
3294        struct inode *inode;
3295
3296        inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
3297        if (!inode)
3298                return ERR_PTR(-ENOENT);
3299
3300        inode->i_op = &proc_tgid_base_inode_operations;
3301        inode->i_fop = &proc_tgid_base_operations;
3302        inode->i_flags|=S_IMMUTABLE;
3303
3304        set_nlink(inode, nlink_tgid);
3305        pid_update_inode(task, inode);
3306
3307        d_set_d_op(dentry, &pid_dentry_operations);
3308        return d_splice_alias(inode, dentry);
3309}
3310
3311struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
3312{
3313        struct task_struct *task;
3314        unsigned tgid;
3315        struct pid_namespace *ns;
3316        struct dentry *result = ERR_PTR(-ENOENT);
3317
3318        tgid = name_to_int(&dentry->d_name);
3319        if (tgid == ~0U)
3320                goto out;
3321
3322        ns = dentry->d_sb->s_fs_info;
3323        rcu_read_lock();
3324        task = find_task_by_pid_ns(tgid, ns);
3325        if (task)
3326                get_task_struct(task);
3327        rcu_read_unlock();
3328        if (!task)
3329                goto out;
3330
3331        result = proc_pid_instantiate(dentry, task, NULL);
3332        put_task_struct(task);
3333out:
3334        return result;
3335}
3336
3337/*
3338 * Find the first task with tgid >= tgid
3339 *
3340 */
3341struct tgid_iter {
3342        unsigned int tgid;
3343        struct task_struct *task;
3344};
3345static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
3346{
3347        struct pid *pid;
3348
3349        if (iter.task)
3350                put_task_struct(iter.task);
3351        rcu_read_lock();
3352retry:
3353        iter.task = NULL;
3354        pid = find_ge_pid(iter.tgid, ns);
3355        if (pid) {
3356                iter.tgid = pid_nr_ns(pid, ns);
3357                iter.task = pid_task(pid, PIDTYPE_PID);
3358                /* What we to know is if the pid we have find is the
3359                 * pid of a thread_group_leader.  Testing for task
3360                 * being a thread_group_leader is the obvious thing
3361                 * todo but there is a window when it fails, due to
3362                 * the pid transfer logic in de_thread.
3363                 *
3364                 * So we perform the straight forward test of seeing
3365                 * if the pid we have found is the pid of a thread
3366                 * group leader, and don't worry if the task we have
3367                 * found doesn't happen to be a thread group leader.
3368                 * As we don't care in the case of readdir.
3369                 */
3370                if (!iter.task || !has_group_leader_pid(iter.task)) {
3371                        iter.tgid += 1;
3372                        goto retry;
3373                }
3374                get_task_struct(iter.task);
3375        }
3376        rcu_read_unlock();
3377        return iter;
3378}
3379
3380#define TGID_OFFSET (FIRST_PROCESS_ENTRY + 2)
3381
3382/* for the /proc/ directory itself, after non-process stuff has been done */
3383int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3384{
3385        struct tgid_iter iter;
3386        struct pid_namespace *ns = proc_pid_ns(file_inode(file));
3387        loff_t pos = ctx->pos;
3388
3389        if (pos >= PID_MAX_LIMIT + TGID_OFFSET)
3390                return 0;
3391
3392        if (pos == TGID_OFFSET - 2) {
3393                struct inode *inode = d_inode(ns->proc_self);
3394                if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK))
3395                        return 0;
3396                ctx->pos = pos = pos + 1;
3397        }
3398        if (pos == TGID_OFFSET - 1) {
3399                struct inode *inode = d_inode(ns->proc_thread_self);
3400                if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK))
3401                        return 0;
3402                ctx->pos = pos = pos + 1;
3403        }
3404        iter.tgid = pos - TGID_OFFSET;
3405        iter.task = NULL;
3406        for (iter = next_tgid(ns, iter);
3407             iter.task;
3408             iter.tgid += 1, iter = next_tgid(ns, iter)) {
3409                char name[10 + 1];
3410                unsigned int len;
3411
3412                cond_resched();
3413                if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
3414                        continue;
3415
3416                len = snprintf(name, sizeof(name), "%u", iter.tgid);
3417                ctx->pos = iter.tgid + TGID_OFFSET;
3418                if (!proc_fill_cache(file, ctx, name, len,
3419                                     proc_pid_instantiate, iter.task, NULL)) {
3420                        put_task_struct(iter.task);
3421                        return 0;
3422                }
3423        }
3424        ctx->pos = PID_MAX_LIMIT + TGID_OFFSET;
3425        return 0;
3426}
3427
3428/*
3429 * proc_tid_comm_permission is a special permission function exclusively
3430 * used for the node /proc/<pid>/task/<tid>/comm.
3431 * It bypasses generic permission checks in the case where a task of the same
3432 * task group attempts to access the node.
3433 * The rationale behind this is that glibc and bionic access this node for
3434 * cross thread naming (pthread_set/getname_np(!self)). However, if
3435 * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
3436 * which locks out the cross thread naming implementation.
3437 * This function makes sure that the node is always accessible for members of
3438 * same thread group.
3439 */
3440static int proc_tid_comm_permission(struct inode *inode, int mask)
3441{
3442        bool is_same_tgroup;
3443        struct task_struct *task;
3444
3445        task = get_proc_task(inode);
3446        if (!task)
3447                return -ESRCH;
3448        is_same_tgroup = same_thread_group(current, task);
3449        put_task_struct(task);
3450
3451        if (likely(is_same_tgroup && !(mask & MAY_EXEC))) {
3452                /* This file (/proc/<pid>/task/<tid>/comm) can always be
3453                 * read or written by the members of the corresponding
3454                 * thread group.
3455                 */
3456                return 0;
3457        }
3458
3459        return generic_permission(inode, mask);
3460}
3461
3462static const struct inode_operations proc_tid_comm_inode_operations = {
3463                .permission = proc_tid_comm_permission,
3464};
3465
3466/*
3467 * Tasks
3468 */
3469static const struct pid_entry tid_base_stuff[] = {
3470        DIR("fd",        S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
3471        DIR("fdinfo",    S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
3472        DIR("ns",        S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
3473#ifdef CONFIG_NET
3474        DIR("net",        S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
3475#endif
3476        REG("environ",   S_IRUSR, proc_environ_operations),
3477        REG("auxv",      S_IRUSR, proc_auxv_operations),
3478        ONE("status",    S_IRUGO, proc_pid_status),
3479        ONE("personality", S_IRUSR, proc_pid_personality),
3480        ONE("limits",    S_IRUGO, proc_pid_limits),
3481#ifdef CONFIG_SCHED_DEBUG
3482        REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3483#endif
3484        NOD("comm",      S_IFREG|S_IRUGO|S_IWUSR,
3485                         &proc_tid_comm_inode_operations,
3486                         &proc_pid_set_comm_operations, {}),
3487#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3488        ONE("syscall",   S_IRUSR, proc_pid_syscall),
3489#endif
3490        REG("cmdline",   S_IRUGO, proc_pid_cmdline_ops),
3491        ONE("stat",      S_IRUGO, proc_tid_stat),
3492        ONE("statm",     S_IRUGO, proc_pid_statm),
3493        REG("maps",      S_IRUGO, proc_pid_maps_operations),
3494#ifdef CONFIG_PROC_CHILDREN
3495        REG("children",  S_IRUGO, proc_tid_children_operations),
3496#endif
3497#ifdef CONFIG_NUMA
3498        REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations),
3499#endif
3500        REG("mem",       S_IRUSR|S_IWUSR, proc_mem_operations),
3501        LNK("cwd",       proc_cwd_link),
3502        LNK("root",      proc_root_link),
3503        LNK("exe",       proc_exe_link),
3504        REG("mounts",    S_IRUGO, proc_mounts_operations),
3505        REG("mountinfo",  S_IRUGO, proc_mountinfo_operations),
3506#ifdef CONFIG_PROC_PAGE_MONITOR
3507        REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
3508        REG("smaps",     S_IRUGO, proc_pid_smaps_operations),
3509        REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations),
3510        REG("pagemap",    S_IRUSR, proc_pagemap_operations),
3511#endif
3512#ifdef CONFIG_SECURITY
3513        DIR("attr",      S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
3514#endif
3515#ifdef CONFIG_KALLSYMS
3516        ONE("wchan",     S_IRUGO, proc_pid_wchan),
3517#endif
3518#ifdef CONFIG_STACKTRACE
3519        ONE("stack",      S_IRUSR, proc_pid_stack),
3520#endif
3521#ifdef CONFIG_SCHED_INFO
3522        ONE("schedstat", S_IRUGO, proc_pid_schedstat),
3523#endif
3524#ifdef CONFIG_LATENCYTOP
3525        REG("latency",  S_IRUGO, proc_lstats_operations),
3526#endif
3527#ifdef CONFIG_PROC_PID_CPUSET
3528        ONE("cpuset",    S_IRUGO, proc_cpuset_show),
3529#endif
3530#ifdef CONFIG_CGROUPS
3531        ONE("cgroup",  S_IRUGO, proc_cgroup_show),
3532#endif
3533#ifdef CONFIG_PROC_CPU_RESCTRL
3534        ONE("cpu_resctrl_groups", S_IRUGO, proc_resctrl_show),
3535#endif
3536        ONE("oom_score", S_IRUGO, proc_oom_score),
3537        REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
3538        REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
3539#ifdef CONFIG_AUDIT
3540        REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
3541        REG("sessionid",  S_IRUGO, proc_sessionid_operations),
3542#endif
3543#ifdef CONFIG_FAULT_INJECTION
3544        REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
3545        REG("fail-nth", 0644, proc_fail_nth_operations),
3546#endif
3547#ifdef CONFIG_TASK_IO_ACCOUNTING
3548        ONE("io",       S_IRUSR, proc_tid_io_accounting),
3549#endif
3550#ifdef CONFIG_USER_NS
3551        REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
3552        REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
3553        REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
3554        REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
3555#endif
3556#ifdef CONFIG_LIVEPATCH
3557        ONE("patch_state",  S_IRUSR, proc_pid_patch_state),
3558#endif
3559#ifdef CONFIG_PROC_PID_ARCH_STATUS
3560        ONE("arch_status", S_IRUGO, proc_pid_arch_status),
3561#endif
3562};
3563
3564static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
3565{
3566        return proc_pident_readdir(file, ctx,
3567                                   tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3568}
3569
3570static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
3571{
3572        return proc_pident_lookup(dir, dentry,
3573                                  tid_base_stuff,
3574                                  tid_base_stuff + ARRAY_SIZE(tid_base_stuff));
3575}
3576
3577static const struct file_operations proc_tid_base_operations = {
3578        .read           = generic_read_dir,
3579        .iterate_shared = proc_tid_base_readdir,
3580        .llseek         = generic_file_llseek,
3581};
3582
3583static const struct inode_operations proc_tid_base_inode_operations = {
3584        .lookup         = proc_tid_base_lookup,
3585        .getattr        = pid_getattr,
3586        .setattr        = proc_setattr,
3587};
3588
3589static struct dentry *proc_task_instantiate(struct dentry *dentry,
3590        struct task_struct *task, const void *ptr)
3591{
3592        struct inode *inode;
3593        inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
3594        if (!inode)
3595                return ERR_PTR(-ENOENT);
3596
3597        inode->i_op = &proc_tid_base_inode_operations;
3598        inode->i_fop = &proc_tid_base_operations;
3599        inode->i_flags |= S_IMMUTABLE;
3600
3601        set_nlink(inode, nlink_tid);
3602        pid_update_inode(task, inode);
3603
3604        d_set_d_op(dentry, &pid_dentry_operations);
3605        return d_splice_alias(inode, dentry);
3606}
3607
3608static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
3609{
3610        struct task_struct *task;
3611        struct task_struct *leader = get_proc_task(dir);
3612        unsigned tid;
3613        struct pid_namespace *ns;
3614        struct dentry *result = ERR_PTR(-ENOENT);
3615
3616        if (!leader)
3617                goto out_no_task;
3618
3619        tid = name_to_int(&dentry->d_name);
3620        if (tid == ~0U)
3621                goto out;
3622
3623        ns = dentry->d_sb->s_fs_info;
3624        rcu_read_lock();
3625        task = find_task_by_pid_ns(tid, ns);
3626        if (task)
3627                get_task_struct(task);
3628        rcu_read_unlock();
3629        if (!task)
3630                goto out;
3631        if (!same_thread_group(leader, task))
3632                goto out_drop_task;
3633
3634        result = proc_task_instantiate(dentry, task, NULL);
3635out_drop_task:
3636        put_task_struct(task);
3637out:
3638        put_task_struct(leader);
3639out_no_task:
3640        return result;
3641}
3642
3643/*
3644 * Find the first tid of a thread group to return to user space.
3645 *
3646 * Usually this is just the thread group leader, but if the users
3647 * buffer was too small or there was a seek into the middle of the
3648 * directory we have more work todo.
3649 *
3650 * In the case of a short read we start with find_task_by_pid.
3651 *
3652 * In the case of a seek we start with the leader and walk nr
3653 * threads past it.
3654 */
3655static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos,
3656                                        struct pid_namespace *ns)
3657{
3658        struct task_struct *pos, *task;
3659        unsigned long nr = f_pos;
3660
3661        if (nr != f_pos)        /* 32bit overflow? */
3662                return NULL;
3663
3664        rcu_read_lock();
3665        task = pid_task(pid, PIDTYPE_PID);
3666        if (!task)
3667                goto fail;
3668
3669        /* Attempt to start with the tid of a thread */
3670        if (tid && nr) {
3671                pos = find_task_by_pid_ns(tid, ns);
3672                if (pos && same_thread_group(pos, task))
3673                        goto found;
3674        }
3675
3676        /* If nr exceeds the number of threads there is nothing todo */
3677        if (nr >= get_nr_threads(task))
3678                goto fail;
3679
3680        /* If we haven't found our starting place yet start
3681         * with the leader and walk nr threads forward.
3682         */
3683        pos = task = task->group_leader;
3684        do {
3685                if (!nr--)
3686                        goto found;
3687        } while_each_thread(task, pos);
3688fail:
3689        pos = NULL;
3690        goto out;
3691found:
3692        get_task_struct(pos);
3693out:
3694        rcu_read_unlock();
3695        return pos;
3696}
3697
3698/*
3699 * Find the next thread in the thread list.
3700 * Return NULL if there is an error or no next thread.
3701 *
3702 * The reference to the input task_struct is released.
3703 */
3704static struct task_struct *next_tid(struct task_struct *start)
3705{
3706        struct task_struct *pos = NULL;
3707        rcu_read_lock();
3708        if (pid_alive(start)) {
3709                pos = next_thread(start);
3710                if (thread_group_leader(pos))
3711                        pos = NULL;
3712                else
3713                        get_task_struct(pos);
3714        }
3715        rcu_read_unlock();
3716        put_task_struct(start);
3717        return pos;
3718}
3719
3720/* for the /proc/TGID/task/ directories */
3721static int proc_task_readdir(struct file *file, struct dir_context *ctx)
3722{
3723        struct inode *inode = file_inode(file);
3724        struct task_struct *task;
3725        struct pid_namespace *ns;
3726        int tid;
3727
3728        if (proc_inode_is_dead(inode))
3729                return -ENOENT;
3730
3731        if (!dir_emit_dots(file, ctx))
3732                return 0;
3733
3734        /* f_version caches the tgid value that the last readdir call couldn't
3735         * return. lseek aka telldir automagically resets f_version to 0.
3736         */
3737        ns = proc_pid_ns(inode);
3738        tid = (int)file->f_version;
3739        file->f_version = 0;
3740        for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
3741             task;
3742             task = next_tid(task), ctx->pos++) {
3743                char name[10 + 1];
3744                unsigned int len;
3745                tid = task_pid_nr_ns(task, ns);
3746                len = snprintf(name, sizeof(name), "%u", tid);
3747                if (!proc_fill_cache(file, ctx, name, len,
3748                                proc_task_instantiate, task, NULL)) {
3749                        /* returning this tgid failed, save it as the first
3750                         * pid for the next readir call */
3751                        file->f_version = (u64)tid;
3752                        put_task_struct(task);
3753                        break;
3754                }
3755        }
3756
3757        return 0;
3758}
3759
3760static int proc_task_getattr(const struct path *path, struct kstat *stat,
3761                             u32 request_mask, unsigned int query_flags)
3762{
3763        struct inode *inode = d_inode(path->dentry);
3764        struct task_struct *p = get_proc_task(inode);
3765        generic_fillattr(inode, stat);
3766
3767        if (p) {
3768                stat->nlink += get_nr_threads(p);
3769                put_task_struct(p);
3770        }
3771
3772        return 0;
3773}
3774
3775static const struct inode_operations proc_task_inode_operations = {
3776        .lookup         = proc_task_lookup,
3777        .getattr        = proc_task_getattr,
3778        .setattr        = proc_setattr,
3779        .permission     = proc_pid_permission,
3780};
3781
3782static const struct file_operations proc_task_operations = {
3783        .read           = generic_read_dir,
3784        .iterate_shared = proc_task_readdir,
3785        .llseek         = generic_file_llseek,
3786};
3787
3788void __init set_proc_pid_nlink(void)
3789{
3790        nlink_tid = pid_entry_nlink(tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
3791        nlink_tgid = pid_entry_nlink(tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
3792}
3793