linux/fs/exec.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/exec.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7/*
   8 * #!-checking implemented by tytso.
   9 */
  10/*
  11 * Demand-loading implemented 01.12.91 - no need to read anything but
  12 * the header into memory. The inode of the executable is put into
  13 * "current->executable", and page faults do the actual loading. Clean.
  14 *
  15 * Once more I can proudly say that linux stood up to being changed: it
  16 * was less than 2 hours work to get demand-loading completely implemented.
  17 *
  18 * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
  19 * current->executable is only used by the procfs.  This allows a dispatch
  20 * table to check for several different types  of binary formats.  We keep
  21 * trying until we recognize the file or we run out of supported binary
  22 * formats. 
  23 */
  24
  25#include <linux/slab.h>
  26#include <linux/file.h>
  27#include <linux/fdtable.h>
  28#include <linux/mm.h>
  29#include <linux/vmacache.h>
  30#include <linux/stat.h>
  31#include <linux/fcntl.h>
  32#include <linux/swap.h>
  33#include <linux/string.h>
  34#include <linux/init.h>
  35#include <linux/pagemap.h>
  36#include <linux/perf_event.h>
  37#include <linux/highmem.h>
  38#include <linux/spinlock.h>
  39#include <linux/key.h>
  40#include <linux/personality.h>
  41#include <linux/binfmts.h>
  42#include <linux/utsname.h>
  43#include <linux/pid_namespace.h>
  44#include <linux/module.h>
  45#include <linux/namei.h>
  46#include <linux/mount.h>
  47#include <linux/security.h>
  48#include <linux/syscalls.h>
  49#include <linux/tsacct_kern.h>
  50#include <linux/cn_proc.h>
  51#include <linux/audit.h>
  52#include <linux/tracehook.h>
  53#include <linux/kmod.h>
  54#include <linux/fsnotify.h>
  55#include <linux/fs_struct.h>
  56#include <linux/pipe_fs_i.h>
  57#include <linux/oom.h>
  58#include <linux/compat.h>
  59
  60#include <asm/uaccess.h>
  61#include <asm/mmu_context.h>
  62#include <asm/tlb.h>
  63
  64#include <trace/events/task.h>
  65#include "internal.h"
  66
  67#include <trace/events/sched.h>
  68
  69int suid_dumpable = 0;
  70
  71static LIST_HEAD(formats);
  72static DEFINE_RWLOCK(binfmt_lock);
  73
  74void __register_binfmt(struct linux_binfmt * fmt, int insert)
  75{
  76        BUG_ON(!fmt);
  77        if (WARN_ON(!fmt->load_binary))
  78                return;
  79        write_lock(&binfmt_lock);
  80        insert ? list_add(&fmt->lh, &formats) :
  81                 list_add_tail(&fmt->lh, &formats);
  82        write_unlock(&binfmt_lock);
  83}
  84
  85EXPORT_SYMBOL(__register_binfmt);
  86
  87void unregister_binfmt(struct linux_binfmt * fmt)
  88{
  89        write_lock(&binfmt_lock);
  90        list_del(&fmt->lh);
  91        write_unlock(&binfmt_lock);
  92}
  93
  94EXPORT_SYMBOL(unregister_binfmt);
  95
  96static inline void put_binfmt(struct linux_binfmt * fmt)
  97{
  98        module_put(fmt->module);
  99}
 100
 101#ifdef CONFIG_USELIB
 102/*
 103 * Note that a shared library must be both readable and executable due to
 104 * security reasons.
 105 *
 106 * Also note that we take the address to load from from the file itself.
 107 */
 108SYSCALL_DEFINE1(uselib, const char __user *, library)
 109{
 110        struct linux_binfmt *fmt;
 111        struct file *file;
 112        struct filename *tmp = getname(library);
 113        int error = PTR_ERR(tmp);
 114        static const struct open_flags uselib_flags = {
 115                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 116                .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
 117                .intent = LOOKUP_OPEN,
 118                .lookup_flags = LOOKUP_FOLLOW,
 119        };
 120
 121        if (IS_ERR(tmp))
 122                goto out;
 123
 124        file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
 125        putname(tmp);
 126        error = PTR_ERR(file);
 127        if (IS_ERR(file))
 128                goto out;
 129
 130        error = -EINVAL;
 131        if (!S_ISREG(file_inode(file)->i_mode))
 132                goto exit;
 133
 134        error = -EACCES;
 135        if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
 136                goto exit;
 137
 138        fsnotify_open(file);
 139
 140        error = -ENOEXEC;
 141
 142        read_lock(&binfmt_lock);
 143        list_for_each_entry(fmt, &formats, lh) {
 144                if (!fmt->load_shlib)
 145                        continue;
 146                if (!try_module_get(fmt->module))
 147                        continue;
 148                read_unlock(&binfmt_lock);
 149                error = fmt->load_shlib(file);
 150                read_lock(&binfmt_lock);
 151                put_binfmt(fmt);
 152                if (error != -ENOEXEC)
 153                        break;
 154        }
 155        read_unlock(&binfmt_lock);
 156exit:
 157        fput(file);
 158out:
 159        return error;
 160}
 161#endif /* #ifdef CONFIG_USELIB */
 162
 163#ifdef CONFIG_MMU
 164/*
 165 * The nascent bprm->mm is not visible until exec_mmap() but it can
 166 * use a lot of memory, account these pages in current->mm temporary
 167 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
 168 * change the counter back via acct_arg_size(0).
 169 */
 170static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 171{
 172        struct mm_struct *mm = current->mm;
 173        long diff = (long)(pages - bprm->vma_pages);
 174
 175        if (!mm || !diff)
 176                return;
 177
 178        bprm->vma_pages = pages;
 179        add_mm_counter(mm, MM_ANONPAGES, diff);
 180}
 181
 182static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 183                int write)
 184{
 185        struct page *page;
 186        int ret;
 187
 188#ifdef CONFIG_STACK_GROWSUP
 189        if (write) {
 190                ret = expand_downwards(bprm->vma, pos);
 191                if (ret < 0)
 192                        return NULL;
 193        }
 194#endif
 195        ret = get_user_pages(current, bprm->mm, pos,
 196                        1, write, 1, &page, NULL);
 197        if (ret <= 0)
 198                return NULL;
 199
 200        if (write) {
 201                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
 202                struct rlimit *rlim;
 203
 204                acct_arg_size(bprm, size / PAGE_SIZE);
 205
 206                /*
 207                 * We've historically supported up to 32 pages (ARG_MAX)
 208                 * of argument strings even with small stacks
 209                 */
 210                if (size <= ARG_MAX)
 211                        return page;
 212
 213                /*
 214                 * Limit to 1/4-th the stack size for the argv+env strings.
 215                 * This ensures that:
 216                 *  - the remaining binfmt code will not run out of stack space,
 217                 *  - the program will have a reasonable amount of stack left
 218                 *    to work from.
 219                 */
 220                rlim = current->signal->rlim;
 221                if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
 222                        put_page(page);
 223                        return NULL;
 224                }
 225        }
 226
 227        return page;
 228}
 229
 230static void put_arg_page(struct page *page)
 231{
 232        put_page(page);
 233}
 234
 235static void free_arg_page(struct linux_binprm *bprm, int i)
 236{
 237}
 238
 239static void free_arg_pages(struct linux_binprm *bprm)
 240{
 241}
 242
 243static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 244                struct page *page)
 245{
 246        flush_cache_page(bprm->vma, pos, page_to_pfn(page));
 247}
 248
 249static int __bprm_mm_init(struct linux_binprm *bprm)
 250{
 251        int err;
 252        struct vm_area_struct *vma = NULL;
 253        struct mm_struct *mm = bprm->mm;
 254
 255        bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
 256        if (!vma)
 257                return -ENOMEM;
 258
 259        down_write(&mm->mmap_sem);
 260        vma->vm_mm = mm;
 261
 262        /*
 263         * Place the stack at the largest stack address the architecture
 264         * supports. Later, we'll move this to an appropriate place. We don't
 265         * use STACK_TOP because that can depend on attributes which aren't
 266         * configured yet.
 267         */
 268        BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
 269        vma->vm_end = STACK_TOP_MAX;
 270        vma->vm_start = vma->vm_end - PAGE_SIZE;
 271        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 272        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 273        INIT_LIST_HEAD(&vma->anon_vma_chain);
 274
 275        err = insert_vm_struct(mm, vma);
 276        if (err)
 277                goto err;
 278
 279        mm->stack_vm = mm->total_vm = 1;
 280        arch_bprm_mm_init(mm, vma);
 281        up_write(&mm->mmap_sem);
 282        bprm->p = vma->vm_end - sizeof(void *);
 283        return 0;
 284err:
 285        up_write(&mm->mmap_sem);
 286        bprm->vma = NULL;
 287        kmem_cache_free(vm_area_cachep, vma);
 288        return err;
 289}
 290
 291static bool valid_arg_len(struct linux_binprm *bprm, long len)
 292{
 293        return len <= MAX_ARG_STRLEN;
 294}
 295
 296#else
 297
 298static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 299{
 300}
 301
 302static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 303                int write)
 304{
 305        struct page *page;
 306
 307        page = bprm->page[pos / PAGE_SIZE];
 308        if (!page && write) {
 309                page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
 310                if (!page)
 311                        return NULL;
 312                bprm->page[pos / PAGE_SIZE] = page;
 313        }
 314
 315        return page;
 316}
 317
 318static void put_arg_page(struct page *page)
 319{
 320}
 321
 322static void free_arg_page(struct linux_binprm *bprm, int i)
 323{
 324        if (bprm->page[i]) {
 325                __free_page(bprm->page[i]);
 326                bprm->page[i] = NULL;
 327        }
 328}
 329
 330static void free_arg_pages(struct linux_binprm *bprm)
 331{
 332        int i;
 333
 334        for (i = 0; i < MAX_ARG_PAGES; i++)
 335                free_arg_page(bprm, i);
 336}
 337
 338static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 339                struct page *page)
 340{
 341}
 342
 343static int __bprm_mm_init(struct linux_binprm *bprm)
 344{
 345        bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
 346        return 0;
 347}
 348
 349static bool valid_arg_len(struct linux_binprm *bprm, long len)
 350{
 351        return len <= bprm->p;
 352}
 353
 354#endif /* CONFIG_MMU */
 355
 356/*
 357 * Create a new mm_struct and populate it with a temporary stack
 358 * vm_area_struct.  We don't have enough context at this point to set the stack
 359 * flags, permissions, and offset, so we use temporary values.  We'll update
 360 * them later in setup_arg_pages().
 361 */
 362static int bprm_mm_init(struct linux_binprm *bprm)
 363{
 364        int err;
 365        struct mm_struct *mm = NULL;
 366
 367        bprm->mm = mm = mm_alloc();
 368        err = -ENOMEM;
 369        if (!mm)
 370                goto err;
 371
 372        err = __bprm_mm_init(bprm);
 373        if (err)
 374                goto err;
 375
 376        return 0;
 377
 378err:
 379        if (mm) {
 380                bprm->mm = NULL;
 381                mmdrop(mm);
 382        }
 383
 384        return err;
 385}
 386
 387struct user_arg_ptr {
 388#ifdef CONFIG_COMPAT
 389        bool is_compat;
 390#endif
 391        union {
 392                const char __user *const __user *native;
 393#ifdef CONFIG_COMPAT
 394                const compat_uptr_t __user *compat;
 395#endif
 396        } ptr;
 397};
 398
 399static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
 400{
 401        const char __user *native;
 402
 403#ifdef CONFIG_COMPAT
 404        if (unlikely(argv.is_compat)) {
 405                compat_uptr_t compat;
 406
 407                if (get_user(compat, argv.ptr.compat + nr))
 408                        return ERR_PTR(-EFAULT);
 409
 410                return compat_ptr(compat);
 411        }
 412#endif
 413
 414        if (get_user(native, argv.ptr.native + nr))
 415                return ERR_PTR(-EFAULT);
 416
 417        return native;
 418}
 419
 420/*
 421 * count() counts the number of strings in array ARGV.
 422 */
 423static int count(struct user_arg_ptr argv, int max)
 424{
 425        int i = 0;
 426
 427        if (argv.ptr.native != NULL) {
 428                for (;;) {
 429                        const char __user *p = get_user_arg_ptr(argv, i);
 430
 431                        if (!p)
 432                                break;
 433
 434                        if (IS_ERR(p))
 435                                return -EFAULT;
 436
 437                        if (i >= max)
 438                                return -E2BIG;
 439                        ++i;
 440
 441                        if (fatal_signal_pending(current))
 442                                return -ERESTARTNOHAND;
 443                        cond_resched();
 444                }
 445        }
 446        return i;
 447}
 448
 449/*
 450 * 'copy_strings()' copies argument/environment strings from the old
 451 * processes's memory to the new process's stack.  The call to get_user_pages()
 452 * ensures the destination page is created and not swapped out.
 453 */
 454static int copy_strings(int argc, struct user_arg_ptr argv,
 455                        struct linux_binprm *bprm)
 456{
 457        struct page *kmapped_page = NULL;
 458        char *kaddr = NULL;
 459        unsigned long kpos = 0;
 460        int ret;
 461
 462        while (argc-- > 0) {
 463                const char __user *str;
 464                int len;
 465                unsigned long pos;
 466
 467                ret = -EFAULT;
 468                str = get_user_arg_ptr(argv, argc);
 469                if (IS_ERR(str))
 470                        goto out;
 471
 472                len = strnlen_user(str, MAX_ARG_STRLEN);
 473                if (!len)
 474                        goto out;
 475
 476                ret = -E2BIG;
 477                if (!valid_arg_len(bprm, len))
 478                        goto out;
 479
 480                /* We're going to work our way backwords. */
 481                pos = bprm->p;
 482                str += len;
 483                bprm->p -= len;
 484
 485                while (len > 0) {
 486                        int offset, bytes_to_copy;
 487
 488                        if (fatal_signal_pending(current)) {
 489                                ret = -ERESTARTNOHAND;
 490                                goto out;
 491                        }
 492                        cond_resched();
 493
 494                        offset = pos % PAGE_SIZE;
 495                        if (offset == 0)
 496                                offset = PAGE_SIZE;
 497
 498                        bytes_to_copy = offset;
 499                        if (bytes_to_copy > len)
 500                                bytes_to_copy = len;
 501
 502                        offset -= bytes_to_copy;
 503                        pos -= bytes_to_copy;
 504                        str -= bytes_to_copy;
 505                        len -= bytes_to_copy;
 506
 507                        if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
 508                                struct page *page;
 509
 510                                page = get_arg_page(bprm, pos, 1);
 511                                if (!page) {
 512                                        ret = -E2BIG;
 513                                        goto out;
 514                                }
 515
 516                                if (kmapped_page) {
 517                                        flush_kernel_dcache_page(kmapped_page);
 518                                        kunmap(kmapped_page);
 519                                        put_arg_page(kmapped_page);
 520                                }
 521                                kmapped_page = page;
 522                                kaddr = kmap(kmapped_page);
 523                                kpos = pos & PAGE_MASK;
 524                                flush_arg_page(bprm, kpos, kmapped_page);
 525                        }
 526                        if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
 527                                ret = -EFAULT;
 528                                goto out;
 529                        }
 530                }
 531        }
 532        ret = 0;
 533out:
 534        if (kmapped_page) {
 535                flush_kernel_dcache_page(kmapped_page);
 536                kunmap(kmapped_page);
 537                put_arg_page(kmapped_page);
 538        }
 539        return ret;
 540}
 541
 542/*
 543 * Like copy_strings, but get argv and its values from kernel memory.
 544 */
 545int copy_strings_kernel(int argc, const char *const *__argv,
 546                        struct linux_binprm *bprm)
 547{
 548        int r;
 549        mm_segment_t oldfs = get_fs();
 550        struct user_arg_ptr argv = {
 551                .ptr.native = (const char __user *const  __user *)__argv,
 552        };
 553
 554        set_fs(KERNEL_DS);
 555        r = copy_strings(argc, argv, bprm);
 556        set_fs(oldfs);
 557
 558        return r;
 559}
 560EXPORT_SYMBOL(copy_strings_kernel);
 561
 562#ifdef CONFIG_MMU
 563
 564/*
 565 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
 566 * the binfmt code determines where the new stack should reside, we shift it to
 567 * its final location.  The process proceeds as follows:
 568 *
 569 * 1) Use shift to calculate the new vma endpoints.
 570 * 2) Extend vma to cover both the old and new ranges.  This ensures the
 571 *    arguments passed to subsequent functions are consistent.
 572 * 3) Move vma's page tables to the new range.
 573 * 4) Free up any cleared pgd range.
 574 * 5) Shrink the vma to cover only the new range.
 575 */
 576static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 577{
 578        struct mm_struct *mm = vma->vm_mm;
 579        unsigned long old_start = vma->vm_start;
 580        unsigned long old_end = vma->vm_end;
 581        unsigned long length = old_end - old_start;
 582        unsigned long new_start = old_start - shift;
 583        unsigned long new_end = old_end - shift;
 584        struct mmu_gather tlb;
 585
 586        BUG_ON(new_start > new_end);
 587
 588        /*
 589         * ensure there are no vmas between where we want to go
 590         * and where we are
 591         */
 592        if (vma != find_vma(mm, new_start))
 593                return -EFAULT;
 594
 595        /*
 596         * cover the whole range: [new_start, old_end)
 597         */
 598        if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
 599                return -ENOMEM;
 600
 601        /*
 602         * move the page tables downwards, on failure we rely on
 603         * process cleanup to remove whatever mess we made.
 604         */
 605        if (length != move_page_tables(vma, old_start,
 606                                       vma, new_start, length, false))
 607                return -ENOMEM;
 608
 609        lru_add_drain();
 610        tlb_gather_mmu(&tlb, mm, old_start, old_end);
 611        if (new_end > old_start) {
 612                /*
 613                 * when the old and new regions overlap clear from new_end.
 614                 */
 615                free_pgd_range(&tlb, new_end, old_end, new_end,
 616                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 617        } else {
 618                /*
 619                 * otherwise, clean from old_start; this is done to not touch
 620                 * the address space in [new_end, old_start) some architectures
 621                 * have constraints on va-space that make this illegal (IA64) -
 622                 * for the others its just a little faster.
 623                 */
 624                free_pgd_range(&tlb, old_start, old_end, new_end,
 625                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 626        }
 627        tlb_finish_mmu(&tlb, old_start, old_end);
 628
 629        /*
 630         * Shrink the vma to just the new range.  Always succeeds.
 631         */
 632        vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
 633
 634        return 0;
 635}
 636
 637/*
 638 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
 639 * the stack is optionally relocated, and some extra space is added.
 640 */
 641int setup_arg_pages(struct linux_binprm *bprm,
 642                    unsigned long stack_top,
 643                    int executable_stack)
 644{
 645        unsigned long ret;
 646        unsigned long stack_shift;
 647        struct mm_struct *mm = current->mm;
 648        struct vm_area_struct *vma = bprm->vma;
 649        struct vm_area_struct *prev = NULL;
 650        unsigned long vm_flags;
 651        unsigned long stack_base;
 652        unsigned long stack_size;
 653        unsigned long stack_expand;
 654        unsigned long rlim_stack;
 655
 656#ifdef CONFIG_STACK_GROWSUP
 657        /* Limit stack size */
 658        stack_base = rlimit_max(RLIMIT_STACK);
 659        if (stack_base > STACK_SIZE_MAX)
 660                stack_base = STACK_SIZE_MAX;
 661
 662        /* Add space for stack randomization. */
 663        stack_base += (STACK_RND_MASK << PAGE_SHIFT);
 664
 665        /* Make sure we didn't let the argument array grow too large. */
 666        if (vma->vm_end - vma->vm_start > stack_base)
 667                return -ENOMEM;
 668
 669        stack_base = PAGE_ALIGN(stack_top - stack_base);
 670
 671        stack_shift = vma->vm_start - stack_base;
 672        mm->arg_start = bprm->p - stack_shift;
 673        bprm->p = vma->vm_end - stack_shift;
 674#else
 675        stack_top = arch_align_stack(stack_top);
 676        stack_top = PAGE_ALIGN(stack_top);
 677
 678        if (unlikely(stack_top < mmap_min_addr) ||
 679            unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
 680                return -ENOMEM;
 681
 682        stack_shift = vma->vm_end - stack_top;
 683
 684        bprm->p -= stack_shift;
 685        mm->arg_start = bprm->p;
 686#endif
 687
 688        if (bprm->loader)
 689                bprm->loader -= stack_shift;
 690        bprm->exec -= stack_shift;
 691
 692        down_write(&mm->mmap_sem);
 693        vm_flags = VM_STACK_FLAGS;
 694
 695        /*
 696         * Adjust stack execute permissions; explicitly enable for
 697         * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
 698         * (arch default) otherwise.
 699         */
 700        if (unlikely(executable_stack == EXSTACK_ENABLE_X))
 701                vm_flags |= VM_EXEC;
 702        else if (executable_stack == EXSTACK_DISABLE_X)
 703                vm_flags &= ~VM_EXEC;
 704        vm_flags |= mm->def_flags;
 705        vm_flags |= VM_STACK_INCOMPLETE_SETUP;
 706
 707        ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
 708                        vm_flags);
 709        if (ret)
 710                goto out_unlock;
 711        BUG_ON(prev != vma);
 712
 713        /* Move stack pages down in memory. */
 714        if (stack_shift) {
 715                ret = shift_arg_pages(vma, stack_shift);
 716                if (ret)
 717                        goto out_unlock;
 718        }
 719
 720        /* mprotect_fixup is overkill to remove the temporary stack flags */
 721        vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
 722
 723        stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
 724        stack_size = vma->vm_end - vma->vm_start;
 725        /*
 726         * Align this down to a page boundary as expand_stack
 727         * will align it up.
 728         */
 729        rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
 730#ifdef CONFIG_STACK_GROWSUP
 731        if (stack_size + stack_expand > rlim_stack)
 732                stack_base = vma->vm_start + rlim_stack;
 733        else
 734                stack_base = vma->vm_end + stack_expand;
 735#else
 736        if (stack_size + stack_expand > rlim_stack)
 737                stack_base = vma->vm_end - rlim_stack;
 738        else
 739                stack_base = vma->vm_start - stack_expand;
 740#endif
 741        current->mm->start_stack = bprm->p;
 742        ret = expand_stack(vma, stack_base);
 743        if (ret)
 744                ret = -EFAULT;
 745
 746out_unlock:
 747        up_write(&mm->mmap_sem);
 748        return ret;
 749}
 750EXPORT_SYMBOL(setup_arg_pages);
 751
 752#endif /* CONFIG_MMU */
 753
 754static struct file *do_open_execat(int fd, struct filename *name, int flags)
 755{
 756        struct file *file;
 757        int err;
 758        struct open_flags open_exec_flags = {
 759                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 760                .acc_mode = MAY_EXEC | MAY_OPEN,
 761                .intent = LOOKUP_OPEN,
 762                .lookup_flags = LOOKUP_FOLLOW,
 763        };
 764
 765        if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
 766                return ERR_PTR(-EINVAL);
 767        if (flags & AT_SYMLINK_NOFOLLOW)
 768                open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
 769        if (flags & AT_EMPTY_PATH)
 770                open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
 771
 772        file = do_filp_open(fd, name, &open_exec_flags);
 773        if (IS_ERR(file))
 774                goto out;
 775
 776        err = -EACCES;
 777        if (!S_ISREG(file_inode(file)->i_mode))
 778                goto exit;
 779
 780        if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
 781                goto exit;
 782
 783        err = deny_write_access(file);
 784        if (err)
 785                goto exit;
 786
 787        if (name->name[0] != '\0')
 788                fsnotify_open(file);
 789
 790out:
 791        return file;
 792
 793exit:
 794        fput(file);
 795        return ERR_PTR(err);
 796}
 797
 798struct file *open_exec(const char *name)
 799{
 800        struct filename *filename = getname_kernel(name);
 801        struct file *f = ERR_CAST(filename);
 802
 803        if (!IS_ERR(filename)) {
 804                f = do_open_execat(AT_FDCWD, filename, 0);
 805                putname(filename);
 806        }
 807        return f;
 808}
 809EXPORT_SYMBOL(open_exec);
 810
 811int kernel_read(struct file *file, loff_t offset,
 812                char *addr, unsigned long count)
 813{
 814        mm_segment_t old_fs;
 815        loff_t pos = offset;
 816        int result;
 817
 818        old_fs = get_fs();
 819        set_fs(get_ds());
 820        /* The cast to a user pointer is valid due to the set_fs() */
 821        result = vfs_read(file, (void __user *)addr, count, &pos);
 822        set_fs(old_fs);
 823        return result;
 824}
 825
 826EXPORT_SYMBOL(kernel_read);
 827
 828ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
 829{
 830        ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
 831        if (res > 0)
 832                flush_icache_range(addr, addr + len);
 833        return res;
 834}
 835EXPORT_SYMBOL(read_code);
 836
 837static int exec_mmap(struct mm_struct *mm)
 838{
 839        struct task_struct *tsk;
 840        struct mm_struct *old_mm, *active_mm;
 841
 842        /* Notify parent that we're no longer interested in the old VM */
 843        tsk = current;
 844        old_mm = current->mm;
 845        mm_release(tsk, old_mm);
 846
 847        if (old_mm) {
 848                sync_mm_rss(old_mm);
 849                /*
 850                 * Make sure that if there is a core dump in progress
 851                 * for the old mm, we get out and die instead of going
 852                 * through with the exec.  We must hold mmap_sem around
 853                 * checking core_state and changing tsk->mm.
 854                 */
 855                down_read(&old_mm->mmap_sem);
 856                if (unlikely(old_mm->core_state)) {
 857                        up_read(&old_mm->mmap_sem);
 858                        return -EINTR;
 859                }
 860        }
 861        task_lock(tsk);
 862        active_mm = tsk->active_mm;
 863        tsk->mm = mm;
 864        tsk->active_mm = mm;
 865        activate_mm(active_mm, mm);
 866        tsk->mm->vmacache_seqnum = 0;
 867        vmacache_flush(tsk);
 868        task_unlock(tsk);
 869        if (old_mm) {
 870                up_read(&old_mm->mmap_sem);
 871                BUG_ON(active_mm != old_mm);
 872                setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
 873                mm_update_next_owner(old_mm);
 874                mmput(old_mm);
 875                return 0;
 876        }
 877        mmdrop(active_mm);
 878        return 0;
 879}
 880
 881/*
 882 * This function makes sure the current process has its own signal table,
 883 * so that flush_signal_handlers can later reset the handlers without
 884 * disturbing other processes.  (Other processes might share the signal
 885 * table via the CLONE_SIGHAND option to clone().)
 886 */
 887static int de_thread(struct task_struct *tsk)
 888{
 889        struct signal_struct *sig = tsk->signal;
 890        struct sighand_struct *oldsighand = tsk->sighand;
 891        spinlock_t *lock = &oldsighand->siglock;
 892
 893        if (thread_group_empty(tsk))
 894                goto no_thread_group;
 895
 896        /*
 897         * Kill all other threads in the thread group.
 898         */
 899        spin_lock_irq(lock);
 900        if (signal_group_exit(sig)) {
 901                /*
 902                 * Another group action in progress, just
 903                 * return so that the signal is processed.
 904                 */
 905                spin_unlock_irq(lock);
 906                return -EAGAIN;
 907        }
 908
 909        sig->group_exit_task = tsk;
 910        sig->notify_count = zap_other_threads(tsk);
 911        if (!thread_group_leader(tsk))
 912                sig->notify_count--;
 913
 914        while (sig->notify_count) {
 915                __set_current_state(TASK_KILLABLE);
 916                spin_unlock_irq(lock);
 917                schedule();
 918                if (unlikely(__fatal_signal_pending(tsk)))
 919                        goto killed;
 920                spin_lock_irq(lock);
 921        }
 922        spin_unlock_irq(lock);
 923
 924        /*
 925         * At this point all other threads have exited, all we have to
 926         * do is to wait for the thread group leader to become inactive,
 927         * and to assume its PID:
 928         */
 929        if (!thread_group_leader(tsk)) {
 930                struct task_struct *leader = tsk->group_leader;
 931
 932                for (;;) {
 933                        threadgroup_change_begin(tsk);
 934                        write_lock_irq(&tasklist_lock);
 935                        /*
 936                         * Do this under tasklist_lock to ensure that
 937                         * exit_notify() can't miss ->group_exit_task
 938                         */
 939                        sig->notify_count = -1;
 940                        if (likely(leader->exit_state))
 941                                break;
 942                        __set_current_state(TASK_KILLABLE);
 943                        write_unlock_irq(&tasklist_lock);
 944                        threadgroup_change_end(tsk);
 945                        schedule();
 946                        if (unlikely(__fatal_signal_pending(tsk)))
 947                                goto killed;
 948                }
 949
 950                /*
 951                 * The only record we have of the real-time age of a
 952                 * process, regardless of execs it's done, is start_time.
 953                 * All the past CPU time is accumulated in signal_struct
 954                 * from sister threads now dead.  But in this non-leader
 955                 * exec, nothing survives from the original leader thread,
 956                 * whose birth marks the true age of this process now.
 957                 * When we take on its identity by switching to its PID, we
 958                 * also take its birthdate (always earlier than our own).
 959                 */
 960                tsk->start_time = leader->start_time;
 961                tsk->real_start_time = leader->real_start_time;
 962
 963                BUG_ON(!same_thread_group(leader, tsk));
 964                BUG_ON(has_group_leader_pid(tsk));
 965                /*
 966                 * An exec() starts a new thread group with the
 967                 * TGID of the previous thread group. Rehash the
 968                 * two threads with a switched PID, and release
 969                 * the former thread group leader:
 970                 */
 971
 972                /* Become a process group leader with the old leader's pid.
 973                 * The old leader becomes a thread of the this thread group.
 974                 * Note: The old leader also uses this pid until release_task
 975                 *       is called.  Odd but simple and correct.
 976                 */
 977                tsk->pid = leader->pid;
 978                change_pid(tsk, PIDTYPE_PID, task_pid(leader));
 979                transfer_pid(leader, tsk, PIDTYPE_PGID);
 980                transfer_pid(leader, tsk, PIDTYPE_SID);
 981
 982                list_replace_rcu(&leader->tasks, &tsk->tasks);
 983                list_replace_init(&leader->sibling, &tsk->sibling);
 984
 985                tsk->group_leader = tsk;
 986                leader->group_leader = tsk;
 987
 988                tsk->exit_signal = SIGCHLD;
 989                leader->exit_signal = -1;
 990
 991                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
 992                leader->exit_state = EXIT_DEAD;
 993
 994                /*
 995                 * We are going to release_task()->ptrace_unlink() silently,
 996                 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
 997                 * the tracer wont't block again waiting for this thread.
 998                 */
 999                if (unlikely(leader->ptrace))
1000                        __wake_up_parent(leader, leader->parent);
1001                write_unlock_irq(&tasklist_lock);
1002                threadgroup_change_end(tsk);
1003
1004                release_task(leader);
1005        }
1006
1007        sig->group_exit_task = NULL;
1008        sig->notify_count = 0;
1009
1010no_thread_group:
1011        /* we have changed execution domain */
1012        tsk->exit_signal = SIGCHLD;
1013
1014        exit_itimers(sig);
1015        flush_itimer_signals();
1016
1017        if (atomic_read(&oldsighand->count) != 1) {
1018                struct sighand_struct *newsighand;
1019                /*
1020                 * This ->sighand is shared with the CLONE_SIGHAND
1021                 * but not CLONE_THREAD task, switch to the new one.
1022                 */
1023                newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1024                if (!newsighand)
1025                        return -ENOMEM;
1026
1027                atomic_set(&newsighand->count, 1);
1028                memcpy(newsighand->action, oldsighand->action,
1029                       sizeof(newsighand->action));
1030
1031                write_lock_irq(&tasklist_lock);
1032                spin_lock(&oldsighand->siglock);
1033                rcu_assign_pointer(tsk->sighand, newsighand);
1034                spin_unlock(&oldsighand->siglock);
1035                write_unlock_irq(&tasklist_lock);
1036
1037                __cleanup_sighand(oldsighand);
1038        }
1039
1040        BUG_ON(!thread_group_leader(tsk));
1041        return 0;
1042
1043killed:
1044        /* protects against exit_notify() and __exit_signal() */
1045        read_lock(&tasklist_lock);
1046        sig->group_exit_task = NULL;
1047        sig->notify_count = 0;
1048        read_unlock(&tasklist_lock);
1049        return -EAGAIN;
1050}
1051
1052char *get_task_comm(char *buf, struct task_struct *tsk)
1053{
1054        /* buf must be at least sizeof(tsk->comm) in size */
1055        task_lock(tsk);
1056        strncpy(buf, tsk->comm, sizeof(tsk->comm));
1057        task_unlock(tsk);
1058        return buf;
1059}
1060EXPORT_SYMBOL_GPL(get_task_comm);
1061
1062/*
1063 * These functions flushes out all traces of the currently running executable
1064 * so that a new one can be started
1065 */
1066
1067void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1068{
1069        task_lock(tsk);
1070        trace_task_rename(tsk, buf);
1071        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1072        task_unlock(tsk);
1073        perf_event_comm(tsk, exec);
1074}
1075
1076int flush_old_exec(struct linux_binprm * bprm)
1077{
1078        int retval;
1079
1080        /*
1081         * Make sure we have a private signal table and that
1082         * we are unassociated from the previous thread group.
1083         */
1084        retval = de_thread(current);
1085        if (retval)
1086                goto out;
1087
1088        /*
1089         * Must be called _before_ exec_mmap() as bprm->mm is
1090         * not visibile until then. This also enables the update
1091         * to be lockless.
1092         */
1093        set_mm_exe_file(bprm->mm, bprm->file);
1094
1095        /*
1096         * Release all of the old mmap stuff
1097         */
1098        acct_arg_size(bprm, 0);
1099        retval = exec_mmap(bprm->mm);
1100        if (retval)
1101                goto out;
1102
1103        bprm->mm = NULL;                /* We're using it now */
1104
1105        set_fs(USER_DS);
1106        current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1107                                        PF_NOFREEZE | PF_NO_SETAFFINITY);
1108        flush_thread();
1109        current->personality &= ~bprm->per_clear;
1110
1111        return 0;
1112
1113out:
1114        return retval;
1115}
1116EXPORT_SYMBOL(flush_old_exec);
1117
1118void would_dump(struct linux_binprm *bprm, struct file *file)
1119{
1120        if (inode_permission(file_inode(file), MAY_READ) < 0)
1121                bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1122}
1123EXPORT_SYMBOL(would_dump);
1124
1125void setup_new_exec(struct linux_binprm * bprm)
1126{
1127        arch_pick_mmap_layout(current->mm);
1128
1129        /* This is the point of no return */
1130        current->sas_ss_sp = current->sas_ss_size = 0;
1131
1132        if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1133                set_dumpable(current->mm, SUID_DUMP_USER);
1134        else
1135                set_dumpable(current->mm, suid_dumpable);
1136
1137        perf_event_exec();
1138        __set_task_comm(current, kbasename(bprm->filename), true);
1139
1140        /* Set the new mm task size. We have to do that late because it may
1141         * depend on TIF_32BIT which is only updated in flush_thread() on
1142         * some architectures like powerpc
1143         */
1144        current->mm->task_size = TASK_SIZE;
1145
1146        /* install the new credentials */
1147        if (!uid_eq(bprm->cred->uid, current_euid()) ||
1148            !gid_eq(bprm->cred->gid, current_egid())) {
1149                current->pdeath_signal = 0;
1150        } else {
1151                would_dump(bprm, bprm->file);
1152                if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1153                        set_dumpable(current->mm, suid_dumpable);
1154        }
1155
1156        /* An exec changes our domain. We are no longer part of the thread
1157           group */
1158        current->self_exec_id++;
1159        flush_signal_handlers(current, 0);
1160        do_close_on_exec(current->files);
1161}
1162EXPORT_SYMBOL(setup_new_exec);
1163
1164/*
1165 * Prepare credentials and lock ->cred_guard_mutex.
1166 * install_exec_creds() commits the new creds and drops the lock.
1167 * Or, if exec fails before, free_bprm() should release ->cred and
1168 * and unlock.
1169 */
1170int prepare_bprm_creds(struct linux_binprm *bprm)
1171{
1172        if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1173                return -ERESTARTNOINTR;
1174
1175        bprm->cred = prepare_exec_creds();
1176        if (likely(bprm->cred))
1177                return 0;
1178
1179        mutex_unlock(&current->signal->cred_guard_mutex);
1180        return -ENOMEM;
1181}
1182
1183static void free_bprm(struct linux_binprm *bprm)
1184{
1185        free_arg_pages(bprm);
1186        if (bprm->cred) {
1187                mutex_unlock(&current->signal->cred_guard_mutex);
1188                abort_creds(bprm->cred);
1189        }
1190        if (bprm->file) {
1191                allow_write_access(bprm->file);
1192                fput(bprm->file);
1193        }
1194        /* If a binfmt changed the interp, free it. */
1195        if (bprm->interp != bprm->filename)
1196                kfree(bprm->interp);
1197        kfree(bprm);
1198}
1199
1200int bprm_change_interp(char *interp, struct linux_binprm *bprm)
1201{
1202        /* If a binfmt changed the interp, free it first. */
1203        if (bprm->interp != bprm->filename)
1204                kfree(bprm->interp);
1205        bprm->interp = kstrdup(interp, GFP_KERNEL);
1206        if (!bprm->interp)
1207                return -ENOMEM;
1208        return 0;
1209}
1210EXPORT_SYMBOL(bprm_change_interp);
1211
1212/*
1213 * install the new credentials for this executable
1214 */
1215void install_exec_creds(struct linux_binprm *bprm)
1216{
1217        security_bprm_committing_creds(bprm);
1218
1219        commit_creds(bprm->cred);
1220        bprm->cred = NULL;
1221
1222        /*
1223         * Disable monitoring for regular users
1224         * when executing setuid binaries. Must
1225         * wait until new credentials are committed
1226         * by commit_creds() above
1227         */
1228        if (get_dumpable(current->mm) != SUID_DUMP_USER)
1229                perf_event_exit_task(current);
1230        /*
1231         * cred_guard_mutex must be held at least to this point to prevent
1232         * ptrace_attach() from altering our determination of the task's
1233         * credentials; any time after this it may be unlocked.
1234         */
1235        security_bprm_committed_creds(bprm);
1236        mutex_unlock(&current->signal->cred_guard_mutex);
1237}
1238EXPORT_SYMBOL(install_exec_creds);
1239
1240/*
1241 * determine how safe it is to execute the proposed program
1242 * - the caller must hold ->cred_guard_mutex to protect against
1243 *   PTRACE_ATTACH or seccomp thread-sync
1244 */
1245static void check_unsafe_exec(struct linux_binprm *bprm)
1246{
1247        struct task_struct *p = current, *t;
1248        unsigned n_fs;
1249
1250        if (p->ptrace) {
1251                if (p->ptrace & PT_PTRACE_CAP)
1252                        bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1253                else
1254                        bprm->unsafe |= LSM_UNSAFE_PTRACE;
1255        }
1256
1257        /*
1258         * This isn't strictly necessary, but it makes it harder for LSMs to
1259         * mess up.
1260         */
1261        if (task_no_new_privs(current))
1262                bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1263
1264        t = p;
1265        n_fs = 1;
1266        spin_lock(&p->fs->lock);
1267        rcu_read_lock();
1268        while_each_thread(p, t) {
1269                if (t->fs == p->fs)
1270                        n_fs++;
1271        }
1272        rcu_read_unlock();
1273
1274        if (p->fs->users > n_fs)
1275                bprm->unsafe |= LSM_UNSAFE_SHARE;
1276        else
1277                p->fs->in_exec = 1;
1278        spin_unlock(&p->fs->lock);
1279}
1280
1281static void bprm_fill_uid(struct linux_binprm *bprm)
1282{
1283        struct inode *inode;
1284        unsigned int mode;
1285        kuid_t uid;
1286        kgid_t gid;
1287
1288        /* clear any previous set[ug]id data from a previous binary */
1289        bprm->cred->euid = current_euid();
1290        bprm->cred->egid = current_egid();
1291
1292        if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
1293                return;
1294
1295        if (task_no_new_privs(current))
1296                return;
1297
1298        inode = file_inode(bprm->file);
1299        mode = READ_ONCE(inode->i_mode);
1300        if (!(mode & (S_ISUID|S_ISGID)))
1301                return;
1302
1303        /* Be careful if suid/sgid is set */
1304        mutex_lock(&inode->i_mutex);
1305
1306        /* reload atomically mode/uid/gid now that lock held */
1307        mode = inode->i_mode;
1308        uid = inode->i_uid;
1309        gid = inode->i_gid;
1310        mutex_unlock(&inode->i_mutex);
1311
1312        /* We ignore suid/sgid if there are no mappings for them in the ns */
1313        if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1314                 !kgid_has_mapping(bprm->cred->user_ns, gid))
1315                return;
1316
1317        if (mode & S_ISUID) {
1318                bprm->per_clear |= PER_CLEAR_ON_SETID;
1319                bprm->cred->euid = uid;
1320        }
1321
1322        if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1323                bprm->per_clear |= PER_CLEAR_ON_SETID;
1324                bprm->cred->egid = gid;
1325        }
1326}
1327
1328/*
1329 * Fill the binprm structure from the inode.
1330 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1331 *
1332 * This may be called multiple times for binary chains (scripts for example).
1333 */
1334int prepare_binprm(struct linux_binprm *bprm)
1335{
1336        int retval;
1337
1338        bprm_fill_uid(bprm);
1339
1340        /* fill in binprm security blob */
1341        retval = security_bprm_set_creds(bprm);
1342        if (retval)
1343                return retval;
1344        bprm->cred_prepared = 1;
1345
1346        memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1347        return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1348}
1349
1350EXPORT_SYMBOL(prepare_binprm);
1351
1352/*
1353 * Arguments are '\0' separated strings found at the location bprm->p
1354 * points to; chop off the first by relocating brpm->p to right after
1355 * the first '\0' encountered.
1356 */
1357int remove_arg_zero(struct linux_binprm *bprm)
1358{
1359        int ret = 0;
1360        unsigned long offset;
1361        char *kaddr;
1362        struct page *page;
1363
1364        if (!bprm->argc)
1365                return 0;
1366
1367        do {
1368                offset = bprm->p & ~PAGE_MASK;
1369                page = get_arg_page(bprm, bprm->p, 0);
1370                if (!page) {
1371                        ret = -EFAULT;
1372                        goto out;
1373                }
1374                kaddr = kmap_atomic(page);
1375
1376                for (; offset < PAGE_SIZE && kaddr[offset];
1377                                offset++, bprm->p++)
1378                        ;
1379
1380                kunmap_atomic(kaddr);
1381                put_arg_page(page);
1382
1383                if (offset == PAGE_SIZE)
1384                        free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1385        } while (offset == PAGE_SIZE);
1386
1387        bprm->p++;
1388        bprm->argc--;
1389        ret = 0;
1390
1391out:
1392        return ret;
1393}
1394EXPORT_SYMBOL(remove_arg_zero);
1395
1396#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1397/*
1398 * cycle the list of binary formats handler, until one recognizes the image
1399 */
1400int search_binary_handler(struct linux_binprm *bprm)
1401{
1402        bool need_retry = IS_ENABLED(CONFIG_MODULES);
1403        struct linux_binfmt *fmt;
1404        int retval;
1405
1406        /* This allows 4 levels of binfmt rewrites before failing hard. */
1407        if (bprm->recursion_depth > 5)
1408                return -ELOOP;
1409
1410        retval = security_bprm_check(bprm);
1411        if (retval)
1412                return retval;
1413
1414        retval = -ENOENT;
1415 retry:
1416        read_lock(&binfmt_lock);
1417        list_for_each_entry(fmt, &formats, lh) {
1418                if (!try_module_get(fmt->module))
1419                        continue;
1420                read_unlock(&binfmt_lock);
1421                bprm->recursion_depth++;
1422                retval = fmt->load_binary(bprm);
1423                read_lock(&binfmt_lock);
1424                put_binfmt(fmt);
1425                bprm->recursion_depth--;
1426                if (retval < 0 && !bprm->mm) {
1427                        /* we got to flush_old_exec() and failed after it */
1428                        read_unlock(&binfmt_lock);
1429                        force_sigsegv(SIGSEGV, current);
1430                        return retval;
1431                }
1432                if (retval != -ENOEXEC || !bprm->file) {
1433                        read_unlock(&binfmt_lock);
1434                        return retval;
1435                }
1436        }
1437        read_unlock(&binfmt_lock);
1438
1439        if (need_retry) {
1440                if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1441                    printable(bprm->buf[2]) && printable(bprm->buf[3]))
1442                        return retval;
1443                if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1444                        return retval;
1445                need_retry = false;
1446                goto retry;
1447        }
1448
1449        return retval;
1450}
1451EXPORT_SYMBOL(search_binary_handler);
1452
1453static int exec_binprm(struct linux_binprm *bprm)
1454{
1455        pid_t old_pid, old_vpid;
1456        int ret;
1457
1458        /* Need to fetch pid before load_binary changes it */
1459        old_pid = current->pid;
1460        rcu_read_lock();
1461        old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1462        rcu_read_unlock();
1463
1464        ret = search_binary_handler(bprm);
1465        if (ret >= 0) {
1466                audit_bprm(bprm);
1467                trace_sched_process_exec(current, old_pid, bprm);
1468                ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1469                proc_exec_connector(current);
1470        }
1471
1472        return ret;
1473}
1474
1475/*
1476 * sys_execve() executes a new program.
1477 */
1478static int do_execveat_common(int fd, struct filename *filename,
1479                              struct user_arg_ptr argv,
1480                              struct user_arg_ptr envp,
1481                              int flags)
1482{
1483        char *pathbuf = NULL;
1484        struct linux_binprm *bprm;
1485        struct file *file;
1486        struct files_struct *displaced;
1487        int retval;
1488
1489        if (IS_ERR(filename))
1490                return PTR_ERR(filename);
1491
1492        /*
1493         * We move the actual failure in case of RLIMIT_NPROC excess from
1494         * set*uid() to execve() because too many poorly written programs
1495         * don't check setuid() return code.  Here we additionally recheck
1496         * whether NPROC limit is still exceeded.
1497         */
1498        if ((current->flags & PF_NPROC_EXCEEDED) &&
1499            atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1500                retval = -EAGAIN;
1501                goto out_ret;
1502        }
1503
1504        /* We're below the limit (still or again), so we don't want to make
1505         * further execve() calls fail. */
1506        current->flags &= ~PF_NPROC_EXCEEDED;
1507
1508        retval = unshare_files(&displaced);
1509        if (retval)
1510                goto out_ret;
1511
1512        retval = -ENOMEM;
1513        bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1514        if (!bprm)
1515                goto out_files;
1516
1517        retval = prepare_bprm_creds(bprm);
1518        if (retval)
1519                goto out_free;
1520
1521        check_unsafe_exec(bprm);
1522        current->in_execve = 1;
1523
1524        file = do_open_execat(fd, filename, flags);
1525        retval = PTR_ERR(file);
1526        if (IS_ERR(file))
1527                goto out_unmark;
1528
1529        sched_exec();
1530
1531        bprm->file = file;
1532        if (fd == AT_FDCWD || filename->name[0] == '/') {
1533                bprm->filename = filename->name;
1534        } else {
1535                if (filename->name[0] == '\0')
1536                        pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d", fd);
1537                else
1538                        pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d/%s",
1539                                            fd, filename->name);
1540                if (!pathbuf) {
1541                        retval = -ENOMEM;
1542                        goto out_unmark;
1543                }
1544                /*
1545                 * Record that a name derived from an O_CLOEXEC fd will be
1546                 * inaccessible after exec. Relies on having exclusive access to
1547                 * current->files (due to unshare_files above).
1548                 */
1549                if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1550                        bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1551                bprm->filename = pathbuf;
1552        }
1553        bprm->interp = bprm->filename;
1554
1555        retval = bprm_mm_init(bprm);
1556        if (retval)
1557                goto out_unmark;
1558
1559        bprm->argc = count(argv, MAX_ARG_STRINGS);
1560        if ((retval = bprm->argc) < 0)
1561                goto out;
1562
1563        bprm->envc = count(envp, MAX_ARG_STRINGS);
1564        if ((retval = bprm->envc) < 0)
1565                goto out;
1566
1567        retval = prepare_binprm(bprm);
1568        if (retval < 0)
1569                goto out;
1570
1571        retval = copy_strings_kernel(1, &bprm->filename, bprm);
1572        if (retval < 0)
1573                goto out;
1574
1575        bprm->exec = bprm->p;
1576        retval = copy_strings(bprm->envc, envp, bprm);
1577        if (retval < 0)
1578                goto out;
1579
1580        retval = copy_strings(bprm->argc, argv, bprm);
1581        if (retval < 0)
1582                goto out;
1583
1584        retval = exec_binprm(bprm);
1585        if (retval < 0)
1586                goto out;
1587
1588        /* execve succeeded */
1589        current->fs->in_exec = 0;
1590        current->in_execve = 0;
1591        acct_update_integrals(current);
1592        task_numa_free(current);
1593        free_bprm(bprm);
1594        kfree(pathbuf);
1595        putname(filename);
1596        if (displaced)
1597                put_files_struct(displaced);
1598        return retval;
1599
1600out:
1601        if (bprm->mm) {
1602                acct_arg_size(bprm, 0);
1603                mmput(bprm->mm);
1604        }
1605
1606out_unmark:
1607        current->fs->in_exec = 0;
1608        current->in_execve = 0;
1609
1610out_free:
1611        free_bprm(bprm);
1612        kfree(pathbuf);
1613
1614out_files:
1615        if (displaced)
1616                reset_files_struct(displaced);
1617out_ret:
1618        putname(filename);
1619        return retval;
1620}
1621
1622int do_execve(struct filename *filename,
1623        const char __user *const __user *__argv,
1624        const char __user *const __user *__envp)
1625{
1626        struct user_arg_ptr argv = { .ptr.native = __argv };
1627        struct user_arg_ptr envp = { .ptr.native = __envp };
1628        return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1629}
1630
1631int do_execveat(int fd, struct filename *filename,
1632                const char __user *const __user *__argv,
1633                const char __user *const __user *__envp,
1634                int flags)
1635{
1636        struct user_arg_ptr argv = { .ptr.native = __argv };
1637        struct user_arg_ptr envp = { .ptr.native = __envp };
1638
1639        return do_execveat_common(fd, filename, argv, envp, flags);
1640}
1641
1642#ifdef CONFIG_COMPAT
1643static int compat_do_execve(struct filename *filename,
1644        const compat_uptr_t __user *__argv,
1645        const compat_uptr_t __user *__envp)
1646{
1647        struct user_arg_ptr argv = {
1648                .is_compat = true,
1649                .ptr.compat = __argv,
1650        };
1651        struct user_arg_ptr envp = {
1652                .is_compat = true,
1653                .ptr.compat = __envp,
1654        };
1655        return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1656}
1657
1658static int compat_do_execveat(int fd, struct filename *filename,
1659                              const compat_uptr_t __user *__argv,
1660                              const compat_uptr_t __user *__envp,
1661                              int flags)
1662{
1663        struct user_arg_ptr argv = {
1664                .is_compat = true,
1665                .ptr.compat = __argv,
1666        };
1667        struct user_arg_ptr envp = {
1668                .is_compat = true,
1669                .ptr.compat = __envp,
1670        };
1671        return do_execveat_common(fd, filename, argv, envp, flags);
1672}
1673#endif
1674
1675void set_binfmt(struct linux_binfmt *new)
1676{
1677        struct mm_struct *mm = current->mm;
1678
1679        if (mm->binfmt)
1680                module_put(mm->binfmt->module);
1681
1682        mm->binfmt = new;
1683        if (new)
1684                __module_get(new->module);
1685}
1686EXPORT_SYMBOL(set_binfmt);
1687
1688/*
1689 * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1690 */
1691void set_dumpable(struct mm_struct *mm, int value)
1692{
1693        unsigned long old, new;
1694
1695        if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1696                return;
1697
1698        do {
1699                old = ACCESS_ONCE(mm->flags);
1700                new = (old & ~MMF_DUMPABLE_MASK) | value;
1701        } while (cmpxchg(&mm->flags, old, new) != old);
1702}
1703
1704SYSCALL_DEFINE3(execve,
1705                const char __user *, filename,
1706                const char __user *const __user *, argv,
1707                const char __user *const __user *, envp)
1708{
1709        return do_execve(getname(filename), argv, envp);
1710}
1711
1712SYSCALL_DEFINE5(execveat,
1713                int, fd, const char __user *, filename,
1714                const char __user *const __user *, argv,
1715                const char __user *const __user *, envp,
1716                int, flags)
1717{
1718        int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1719
1720        return do_execveat(fd,
1721                           getname_flags(filename, lookup_flags, NULL),
1722                           argv, envp, flags);
1723}
1724
1725#ifdef CONFIG_COMPAT
1726COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1727        const compat_uptr_t __user *, argv,
1728        const compat_uptr_t __user *, envp)
1729{
1730        return compat_do_execve(getname(filename), argv, envp);
1731}
1732
1733COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
1734                       const char __user *, filename,
1735                       const compat_uptr_t __user *, argv,
1736                       const compat_uptr_t __user *, envp,
1737                       int,  flags)
1738{
1739        int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1740
1741        return compat_do_execveat(fd,
1742                                  getname_flags(filename, lookup_flags, NULL),
1743                                  argv, envp, flags);
1744}
1745#endif
1746