linux/fs/exec.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/fs/exec.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 */
   7
   8/*
   9 * #!-checking implemented by tytso.
  10 */
  11/*
  12 * Demand-loading implemented 01.12.91 - no need to read anything but
  13 * the header into memory. The inode of the executable is put into
  14 * "current->executable", and page faults do the actual loading. Clean.
  15 *
  16 * Once more I can proudly say that linux stood up to being changed: it
  17 * was less than 2 hours work to get demand-loading completely implemented.
  18 *
  19 * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
  20 * current->executable is only used by the procfs.  This allows a dispatch
  21 * table to check for several different types  of binary formats.  We keep
  22 * trying until we recognize the file or we run out of supported binary
  23 * formats.
  24 */
  25
  26#include <linux/kernel_read_file.h>
  27#include <linux/slab.h>
  28#include <linux/file.h>
  29#include <linux/fdtable.h>
  30#include <linux/mm.h>
  31#include <linux/vmacache.h>
  32#include <linux/stat.h>
  33#include <linux/fcntl.h>
  34#include <linux/swap.h>
  35#include <linux/string.h>
  36#include <linux/init.h>
  37#include <linux/sched/mm.h>
  38#include <linux/sched/coredump.h>
  39#include <linux/sched/signal.h>
  40#include <linux/sched/numa_balancing.h>
  41#include <linux/sched/task.h>
  42#include <linux/pagemap.h>
  43#include <linux/perf_event.h>
  44#include <linux/highmem.h>
  45#include <linux/spinlock.h>
  46#include <linux/key.h>
  47#include <linux/personality.h>
  48#include <linux/binfmts.h>
  49#include <linux/utsname.h>
  50#include <linux/pid_namespace.h>
  51#include <linux/module.h>
  52#include <linux/namei.h>
  53#include <linux/mount.h>
  54#include <linux/security.h>
  55#include <linux/syscalls.h>
  56#include <linux/tsacct_kern.h>
  57#include <linux/cn_proc.h>
  58#include <linux/audit.h>
  59#include <linux/tracehook.h>
  60#include <linux/kmod.h>
  61#include <linux/fsnotify.h>
  62#include <linux/fs_struct.h>
  63#include <linux/oom.h>
  64#include <linux/compat.h>
  65#include <linux/vmalloc.h>
  66#include <linux/io_uring.h>
  67#include <linux/syscall_user_dispatch.h>
  68
  69#include <linux/uaccess.h>
  70#include <asm/mmu_context.h>
  71#include <asm/tlb.h>
  72
  73#include <trace/events/task.h>
  74#include "internal.h"
  75
  76#include <trace/events/sched.h>
  77
  78static int bprm_creds_from_file(struct linux_binprm *bprm);
  79
  80int suid_dumpable = 0;
  81
  82static LIST_HEAD(formats);
  83static DEFINE_RWLOCK(binfmt_lock);
  84
  85void __register_binfmt(struct linux_binfmt * fmt, int insert)
  86{
  87        write_lock(&binfmt_lock);
  88        insert ? list_add(&fmt->lh, &formats) :
  89                 list_add_tail(&fmt->lh, &formats);
  90        write_unlock(&binfmt_lock);
  91}
  92
  93EXPORT_SYMBOL(__register_binfmt);
  94
  95void unregister_binfmt(struct linux_binfmt * fmt)
  96{
  97        write_lock(&binfmt_lock);
  98        list_del(&fmt->lh);
  99        write_unlock(&binfmt_lock);
 100}
 101
 102EXPORT_SYMBOL(unregister_binfmt);
 103
 104static inline void put_binfmt(struct linux_binfmt * fmt)
 105{
 106        module_put(fmt->module);
 107}
 108
 109bool path_noexec(const struct path *path)
 110{
 111        return (path->mnt->mnt_flags & MNT_NOEXEC) ||
 112               (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
 113}
 114
 115#ifdef CONFIG_USELIB
 116/*
 117 * Note that a shared library must be both readable and executable due to
 118 * security reasons.
 119 *
 120 * Also note that we take the address to load from from the file itself.
 121 */
 122SYSCALL_DEFINE1(uselib, const char __user *, library)
 123{
 124        struct linux_binfmt *fmt;
 125        struct file *file;
 126        struct filename *tmp = getname(library);
 127        int error = PTR_ERR(tmp);
 128        static const struct open_flags uselib_flags = {
 129                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 130                .acc_mode = MAY_READ | MAY_EXEC,
 131                .intent = LOOKUP_OPEN,
 132                .lookup_flags = LOOKUP_FOLLOW,
 133        };
 134
 135        if (IS_ERR(tmp))
 136                goto out;
 137
 138        file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
 139        putname(tmp);
 140        error = PTR_ERR(file);
 141        if (IS_ERR(file))
 142                goto out;
 143
 144        /*
 145         * may_open() has already checked for this, so it should be
 146         * impossible to trip now. But we need to be extra cautious
 147         * and check again at the very end too.
 148         */
 149        error = -EACCES;
 150        if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
 151                         path_noexec(&file->f_path)))
 152                goto exit;
 153
 154        fsnotify_open(file);
 155
 156        error = -ENOEXEC;
 157
 158        read_lock(&binfmt_lock);
 159        list_for_each_entry(fmt, &formats, lh) {
 160                if (!fmt->load_shlib)
 161                        continue;
 162                if (!try_module_get(fmt->module))
 163                        continue;
 164                read_unlock(&binfmt_lock);
 165                error = fmt->load_shlib(file);
 166                read_lock(&binfmt_lock);
 167                put_binfmt(fmt);
 168                if (error != -ENOEXEC)
 169                        break;
 170        }
 171        read_unlock(&binfmt_lock);
 172exit:
 173        fput(file);
 174out:
 175        return error;
 176}
 177#endif /* #ifdef CONFIG_USELIB */
 178
 179#ifdef CONFIG_MMU
 180/*
 181 * The nascent bprm->mm is not visible until exec_mmap() but it can
 182 * use a lot of memory, account these pages in current->mm temporary
 183 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
 184 * change the counter back via acct_arg_size(0).
 185 */
 186static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 187{
 188        struct mm_struct *mm = current->mm;
 189        long diff = (long)(pages - bprm->vma_pages);
 190
 191        if (!mm || !diff)
 192                return;
 193
 194        bprm->vma_pages = pages;
 195        add_mm_counter(mm, MM_ANONPAGES, diff);
 196}
 197
 198static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 199                int write)
 200{
 201        struct page *page;
 202        int ret;
 203        unsigned int gup_flags = FOLL_FORCE;
 204
 205#ifdef CONFIG_STACK_GROWSUP
 206        if (write) {
 207                ret = expand_downwards(bprm->vma, pos);
 208                if (ret < 0)
 209                        return NULL;
 210        }
 211#endif
 212
 213        if (write)
 214                gup_flags |= FOLL_WRITE;
 215
 216        /*
 217         * We are doing an exec().  'current' is the process
 218         * doing the exec and bprm->mm is the new process's mm.
 219         */
 220        mmap_read_lock(bprm->mm);
 221        ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
 222                        &page, NULL, NULL);
 223        mmap_read_unlock(bprm->mm);
 224        if (ret <= 0)
 225                return NULL;
 226
 227        if (write)
 228                acct_arg_size(bprm, vma_pages(bprm->vma));
 229
 230        return page;
 231}
 232
 233static void put_arg_page(struct page *page)
 234{
 235        put_page(page);
 236}
 237
 238static void free_arg_pages(struct linux_binprm *bprm)
 239{
 240}
 241
 242static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 243                struct page *page)
 244{
 245        flush_cache_page(bprm->vma, pos, page_to_pfn(page));
 246}
 247
 248static int __bprm_mm_init(struct linux_binprm *bprm)
 249{
 250        int err;
 251        struct vm_area_struct *vma = NULL;
 252        struct mm_struct *mm = bprm->mm;
 253
 254        bprm->vma = vma = vm_area_alloc(mm);
 255        if (!vma)
 256                return -ENOMEM;
 257        vma_set_anonymous(vma);
 258
 259        if (mmap_write_lock_killable(mm)) {
 260                err = -EINTR;
 261                goto err_free;
 262        }
 263
 264        /*
 265         * Place the stack at the largest stack address the architecture
 266         * supports. Later, we'll move this to an appropriate place. We don't
 267         * use STACK_TOP because that can depend on attributes which aren't
 268         * configured yet.
 269         */
 270        BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
 271        vma->vm_end = STACK_TOP_MAX;
 272        vma->vm_start = vma->vm_end - PAGE_SIZE;
 273        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 274        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 275
 276        err = insert_vm_struct(mm, vma);
 277        if (err)
 278                goto err;
 279
 280        mm->stack_vm = mm->total_vm = 1;
 281        mmap_write_unlock(mm);
 282        bprm->p = vma->vm_end - sizeof(void *);
 283        return 0;
 284err:
 285        mmap_write_unlock(mm);
 286err_free:
 287        bprm->vma = NULL;
 288        vm_area_free(vma);
 289        return err;
 290}
 291
 292static bool valid_arg_len(struct linux_binprm *bprm, long len)
 293{
 294        return len <= MAX_ARG_STRLEN;
 295}
 296
 297#else
 298
 299static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 300{
 301}
 302
 303static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 304                int write)
 305{
 306        struct page *page;
 307
 308        page = bprm->page[pos / PAGE_SIZE];
 309        if (!page && write) {
 310                page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
 311                if (!page)
 312                        return NULL;
 313                bprm->page[pos / PAGE_SIZE] = page;
 314        }
 315
 316        return page;
 317}
 318
 319static void put_arg_page(struct page *page)
 320{
 321}
 322
 323static void free_arg_page(struct linux_binprm *bprm, int i)
 324{
 325        if (bprm->page[i]) {
 326                __free_page(bprm->page[i]);
 327                bprm->page[i] = NULL;
 328        }
 329}
 330
 331static void free_arg_pages(struct linux_binprm *bprm)
 332{
 333        int i;
 334
 335        for (i = 0; i < MAX_ARG_PAGES; i++)
 336                free_arg_page(bprm, i);
 337}
 338
 339static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 340                struct page *page)
 341{
 342}
 343
 344static int __bprm_mm_init(struct linux_binprm *bprm)
 345{
 346        bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
 347        return 0;
 348}
 349
 350static bool valid_arg_len(struct linux_binprm *bprm, long len)
 351{
 352        return len <= bprm->p;
 353}
 354
 355#endif /* CONFIG_MMU */
 356
 357/*
 358 * Create a new mm_struct and populate it with a temporary stack
 359 * vm_area_struct.  We don't have enough context at this point to set the stack
 360 * flags, permissions, and offset, so we use temporary values.  We'll update
 361 * them later in setup_arg_pages().
 362 */
 363static int bprm_mm_init(struct linux_binprm *bprm)
 364{
 365        int err;
 366        struct mm_struct *mm = NULL;
 367
 368        bprm->mm = mm = mm_alloc();
 369        err = -ENOMEM;
 370        if (!mm)
 371                goto err;
 372
 373        /* Save current stack limit for all calculations made during exec. */
 374        task_lock(current->group_leader);
 375        bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
 376        task_unlock(current->group_leader);
 377
 378        err = __bprm_mm_init(bprm);
 379        if (err)
 380                goto err;
 381
 382        return 0;
 383
 384err:
 385        if (mm) {
 386                bprm->mm = NULL;
 387                mmdrop(mm);
 388        }
 389
 390        return err;
 391}
 392
 393struct user_arg_ptr {
 394#ifdef CONFIG_COMPAT
 395        bool is_compat;
 396#endif
 397        union {
 398                const char __user *const __user *native;
 399#ifdef CONFIG_COMPAT
 400                const compat_uptr_t __user *compat;
 401#endif
 402        } ptr;
 403};
 404
 405static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
 406{
 407        const char __user *native;
 408
 409#ifdef CONFIG_COMPAT
 410        if (unlikely(argv.is_compat)) {
 411                compat_uptr_t compat;
 412
 413                if (get_user(compat, argv.ptr.compat + nr))
 414                        return ERR_PTR(-EFAULT);
 415
 416                return compat_ptr(compat);
 417        }
 418#endif
 419
 420        if (get_user(native, argv.ptr.native + nr))
 421                return ERR_PTR(-EFAULT);
 422
 423        return native;
 424}
 425
 426/*
 427 * count() counts the number of strings in array ARGV.
 428 */
 429static int count(struct user_arg_ptr argv, int max)
 430{
 431        int i = 0;
 432
 433        if (argv.ptr.native != NULL) {
 434                for (;;) {
 435                        const char __user *p = get_user_arg_ptr(argv, i);
 436
 437                        if (!p)
 438                                break;
 439
 440                        if (IS_ERR(p))
 441                                return -EFAULT;
 442
 443                        if (i >= max)
 444                                return -E2BIG;
 445                        ++i;
 446
 447                        if (fatal_signal_pending(current))
 448                                return -ERESTARTNOHAND;
 449                        cond_resched();
 450                }
 451        }
 452        return i;
 453}
 454
 455static int count_strings_kernel(const char *const *argv)
 456{
 457        int i;
 458
 459        if (!argv)
 460                return 0;
 461
 462        for (i = 0; argv[i]; ++i) {
 463                if (i >= MAX_ARG_STRINGS)
 464                        return -E2BIG;
 465                if (fatal_signal_pending(current))
 466                        return -ERESTARTNOHAND;
 467                cond_resched();
 468        }
 469        return i;
 470}
 471
 472static int bprm_stack_limits(struct linux_binprm *bprm)
 473{
 474        unsigned long limit, ptr_size;
 475
 476        /*
 477         * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
 478         * (whichever is smaller) for the argv+env strings.
 479         * This ensures that:
 480         *  - the remaining binfmt code will not run out of stack space,
 481         *  - the program will have a reasonable amount of stack left
 482         *    to work from.
 483         */
 484        limit = _STK_LIM / 4 * 3;
 485        limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
 486        /*
 487         * We've historically supported up to 32 pages (ARG_MAX)
 488         * of argument strings even with small stacks
 489         */
 490        limit = max_t(unsigned long, limit, ARG_MAX);
 491        /*
 492         * We must account for the size of all the argv and envp pointers to
 493         * the argv and envp strings, since they will also take up space in
 494         * the stack. They aren't stored until much later when we can't
 495         * signal to the parent that the child has run out of stack space.
 496         * Instead, calculate it here so it's possible to fail gracefully.
 497         */
 498        ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
 499        if (limit <= ptr_size)
 500                return -E2BIG;
 501        limit -= ptr_size;
 502
 503        bprm->argmin = bprm->p - limit;
 504        return 0;
 505}
 506
 507/*
 508 * 'copy_strings()' copies argument/environment strings from the old
 509 * processes's memory to the new process's stack.  The call to get_user_pages()
 510 * ensures the destination page is created and not swapped out.
 511 */
 512static int copy_strings(int argc, struct user_arg_ptr argv,
 513                        struct linux_binprm *bprm)
 514{
 515        struct page *kmapped_page = NULL;
 516        char *kaddr = NULL;
 517        unsigned long kpos = 0;
 518        int ret;
 519
 520        while (argc-- > 0) {
 521                const char __user *str;
 522                int len;
 523                unsigned long pos;
 524
 525                ret = -EFAULT;
 526                str = get_user_arg_ptr(argv, argc);
 527                if (IS_ERR(str))
 528                        goto out;
 529
 530                len = strnlen_user(str, MAX_ARG_STRLEN);
 531                if (!len)
 532                        goto out;
 533
 534                ret = -E2BIG;
 535                if (!valid_arg_len(bprm, len))
 536                        goto out;
 537
 538                /* We're going to work our way backwords. */
 539                pos = bprm->p;
 540                str += len;
 541                bprm->p -= len;
 542#ifdef CONFIG_MMU
 543                if (bprm->p < bprm->argmin)
 544                        goto out;
 545#endif
 546
 547                while (len > 0) {
 548                        int offset, bytes_to_copy;
 549
 550                        if (fatal_signal_pending(current)) {
 551                                ret = -ERESTARTNOHAND;
 552                                goto out;
 553                        }
 554                        cond_resched();
 555
 556                        offset = pos % PAGE_SIZE;
 557                        if (offset == 0)
 558                                offset = PAGE_SIZE;
 559
 560                        bytes_to_copy = offset;
 561                        if (bytes_to_copy > len)
 562                                bytes_to_copy = len;
 563
 564                        offset -= bytes_to_copy;
 565                        pos -= bytes_to_copy;
 566                        str -= bytes_to_copy;
 567                        len -= bytes_to_copy;
 568
 569                        if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
 570                                struct page *page;
 571
 572                                page = get_arg_page(bprm, pos, 1);
 573                                if (!page) {
 574                                        ret = -E2BIG;
 575                                        goto out;
 576                                }
 577
 578                                if (kmapped_page) {
 579                                        flush_dcache_page(kmapped_page);
 580                                        kunmap(kmapped_page);
 581                                        put_arg_page(kmapped_page);
 582                                }
 583                                kmapped_page = page;
 584                                kaddr = kmap(kmapped_page);
 585                                kpos = pos & PAGE_MASK;
 586                                flush_arg_page(bprm, kpos, kmapped_page);
 587                        }
 588                        if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
 589                                ret = -EFAULT;
 590                                goto out;
 591                        }
 592                }
 593        }
 594        ret = 0;
 595out:
 596        if (kmapped_page) {
 597                flush_dcache_page(kmapped_page);
 598                kunmap(kmapped_page);
 599                put_arg_page(kmapped_page);
 600        }
 601        return ret;
 602}
 603
 604/*
 605 * Copy and argument/environment string from the kernel to the processes stack.
 606 */
 607int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
 608{
 609        int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
 610        unsigned long pos = bprm->p;
 611
 612        if (len == 0)
 613                return -EFAULT;
 614        if (!valid_arg_len(bprm, len))
 615                return -E2BIG;
 616
 617        /* We're going to work our way backwards. */
 618        arg += len;
 619        bprm->p -= len;
 620        if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
 621                return -E2BIG;
 622
 623        while (len > 0) {
 624                unsigned int bytes_to_copy = min_t(unsigned int, len,
 625                                min_not_zero(offset_in_page(pos), PAGE_SIZE));
 626                struct page *page;
 627                char *kaddr;
 628
 629                pos -= bytes_to_copy;
 630                arg -= bytes_to_copy;
 631                len -= bytes_to_copy;
 632
 633                page = get_arg_page(bprm, pos, 1);
 634                if (!page)
 635                        return -E2BIG;
 636                kaddr = kmap_atomic(page);
 637                flush_arg_page(bprm, pos & PAGE_MASK, page);
 638                memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
 639                flush_dcache_page(page);
 640                kunmap_atomic(kaddr);
 641                put_arg_page(page);
 642        }
 643
 644        return 0;
 645}
 646EXPORT_SYMBOL(copy_string_kernel);
 647
 648static int copy_strings_kernel(int argc, const char *const *argv,
 649                               struct linux_binprm *bprm)
 650{
 651        while (argc-- > 0) {
 652                int ret = copy_string_kernel(argv[argc], bprm);
 653                if (ret < 0)
 654                        return ret;
 655                if (fatal_signal_pending(current))
 656                        return -ERESTARTNOHAND;
 657                cond_resched();
 658        }
 659        return 0;
 660}
 661
 662#ifdef CONFIG_MMU
 663
 664/*
 665 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
 666 * the binfmt code determines where the new stack should reside, we shift it to
 667 * its final location.  The process proceeds as follows:
 668 *
 669 * 1) Use shift to calculate the new vma endpoints.
 670 * 2) Extend vma to cover both the old and new ranges.  This ensures the
 671 *    arguments passed to subsequent functions are consistent.
 672 * 3) Move vma's page tables to the new range.
 673 * 4) Free up any cleared pgd range.
 674 * 5) Shrink the vma to cover only the new range.
 675 */
 676static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 677{
 678        struct mm_struct *mm = vma->vm_mm;
 679        unsigned long old_start = vma->vm_start;
 680        unsigned long old_end = vma->vm_end;
 681        unsigned long length = old_end - old_start;
 682        unsigned long new_start = old_start - shift;
 683        unsigned long new_end = old_end - shift;
 684        struct mmu_gather tlb;
 685
 686        BUG_ON(new_start > new_end);
 687
 688        /*
 689         * ensure there are no vmas between where we want to go
 690         * and where we are
 691         */
 692        if (vma != find_vma(mm, new_start))
 693                return -EFAULT;
 694
 695        /*
 696         * cover the whole range: [new_start, old_end)
 697         */
 698        if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
 699                return -ENOMEM;
 700
 701        /*
 702         * move the page tables downwards, on failure we rely on
 703         * process cleanup to remove whatever mess we made.
 704         */
 705        if (length != move_page_tables(vma, old_start,
 706                                       vma, new_start, length, false))
 707                return -ENOMEM;
 708
 709        lru_add_drain();
 710        tlb_gather_mmu(&tlb, mm);
 711        if (new_end > old_start) {
 712                /*
 713                 * when the old and new regions overlap clear from new_end.
 714                 */
 715                free_pgd_range(&tlb, new_end, old_end, new_end,
 716                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 717        } else {
 718                /*
 719                 * otherwise, clean from old_start; this is done to not touch
 720                 * the address space in [new_end, old_start) some architectures
 721                 * have constraints on va-space that make this illegal (IA64) -
 722                 * for the others its just a little faster.
 723                 */
 724                free_pgd_range(&tlb, old_start, old_end, new_end,
 725                        vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 726        }
 727        tlb_finish_mmu(&tlb);
 728
 729        /*
 730         * Shrink the vma to just the new range.  Always succeeds.
 731         */
 732        vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
 733
 734        return 0;
 735}
 736
 737/*
 738 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
 739 * the stack is optionally relocated, and some extra space is added.
 740 */
 741int setup_arg_pages(struct linux_binprm *bprm,
 742                    unsigned long stack_top,
 743                    int executable_stack)
 744{
 745        unsigned long ret;
 746        unsigned long stack_shift;
 747        struct mm_struct *mm = current->mm;
 748        struct vm_area_struct *vma = bprm->vma;
 749        struct vm_area_struct *prev = NULL;
 750        unsigned long vm_flags;
 751        unsigned long stack_base;
 752        unsigned long stack_size;
 753        unsigned long stack_expand;
 754        unsigned long rlim_stack;
 755
 756#ifdef CONFIG_STACK_GROWSUP
 757        /* Limit stack size */
 758        stack_base = bprm->rlim_stack.rlim_max;
 759
 760        stack_base = calc_max_stack_size(stack_base);
 761
 762        /* Add space for stack randomization. */
 763        stack_base += (STACK_RND_MASK << PAGE_SHIFT);
 764
 765        /* Make sure we didn't let the argument array grow too large. */
 766        if (vma->vm_end - vma->vm_start > stack_base)
 767                return -ENOMEM;
 768
 769        stack_base = PAGE_ALIGN(stack_top - stack_base);
 770
 771        stack_shift = vma->vm_start - stack_base;
 772        mm->arg_start = bprm->p - stack_shift;
 773        bprm->p = vma->vm_end - stack_shift;
 774#else
 775        stack_top = arch_align_stack(stack_top);
 776        stack_top = PAGE_ALIGN(stack_top);
 777
 778        if (unlikely(stack_top < mmap_min_addr) ||
 779            unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
 780                return -ENOMEM;
 781
 782        stack_shift = vma->vm_end - stack_top;
 783
 784        bprm->p -= stack_shift;
 785        mm->arg_start = bprm->p;
 786#endif
 787
 788        if (bprm->loader)
 789                bprm->loader -= stack_shift;
 790        bprm->exec -= stack_shift;
 791
 792        if (mmap_write_lock_killable(mm))
 793                return -EINTR;
 794
 795        vm_flags = VM_STACK_FLAGS;
 796
 797        /*
 798         * Adjust stack execute permissions; explicitly enable for
 799         * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
 800         * (arch default) otherwise.
 801         */
 802        if (unlikely(executable_stack == EXSTACK_ENABLE_X))
 803                vm_flags |= VM_EXEC;
 804        else if (executable_stack == EXSTACK_DISABLE_X)
 805                vm_flags &= ~VM_EXEC;
 806        vm_flags |= mm->def_flags;
 807        vm_flags |= VM_STACK_INCOMPLETE_SETUP;
 808
 809        ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
 810                        vm_flags);
 811        if (ret)
 812                goto out_unlock;
 813        BUG_ON(prev != vma);
 814
 815        if (unlikely(vm_flags & VM_EXEC)) {
 816                pr_warn_once("process '%pD4' started with executable stack\n",
 817                             bprm->file);
 818        }
 819
 820        /* Move stack pages down in memory. */
 821        if (stack_shift) {
 822                ret = shift_arg_pages(vma, stack_shift);
 823                if (ret)
 824                        goto out_unlock;
 825        }
 826
 827        /* mprotect_fixup is overkill to remove the temporary stack flags */
 828        vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
 829
 830        stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
 831        stack_size = vma->vm_end - vma->vm_start;
 832        /*
 833         * Align this down to a page boundary as expand_stack
 834         * will align it up.
 835         */
 836        rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
 837#ifdef CONFIG_STACK_GROWSUP
 838        if (stack_size + stack_expand > rlim_stack)
 839                stack_base = vma->vm_start + rlim_stack;
 840        else
 841                stack_base = vma->vm_end + stack_expand;
 842#else
 843        if (stack_size + stack_expand > rlim_stack)
 844                stack_base = vma->vm_end - rlim_stack;
 845        else
 846                stack_base = vma->vm_start - stack_expand;
 847#endif
 848        current->mm->start_stack = bprm->p;
 849        ret = expand_stack(vma, stack_base);
 850        if (ret)
 851                ret = -EFAULT;
 852
 853out_unlock:
 854        mmap_write_unlock(mm);
 855        return ret;
 856}
 857EXPORT_SYMBOL(setup_arg_pages);
 858
 859#else
 860
 861/*
 862 * Transfer the program arguments and environment from the holding pages
 863 * onto the stack. The provided stack pointer is adjusted accordingly.
 864 */
 865int transfer_args_to_stack(struct linux_binprm *bprm,
 866                           unsigned long *sp_location)
 867{
 868        unsigned long index, stop, sp;
 869        int ret = 0;
 870
 871        stop = bprm->p >> PAGE_SHIFT;
 872        sp = *sp_location;
 873
 874        for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
 875                unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
 876                char *src = kmap(bprm->page[index]) + offset;
 877                sp -= PAGE_SIZE - offset;
 878                if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
 879                        ret = -EFAULT;
 880                kunmap(bprm->page[index]);
 881                if (ret)
 882                        goto out;
 883        }
 884
 885        *sp_location = sp;
 886
 887out:
 888        return ret;
 889}
 890EXPORT_SYMBOL(transfer_args_to_stack);
 891
 892#endif /* CONFIG_MMU */
 893
 894static struct file *do_open_execat(int fd, struct filename *name, int flags)
 895{
 896        struct file *file;
 897        int err;
 898        struct open_flags open_exec_flags = {
 899                .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 900                .acc_mode = MAY_EXEC,
 901                .intent = LOOKUP_OPEN,
 902                .lookup_flags = LOOKUP_FOLLOW,
 903        };
 904
 905        if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
 906                return ERR_PTR(-EINVAL);
 907        if (flags & AT_SYMLINK_NOFOLLOW)
 908                open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
 909        if (flags & AT_EMPTY_PATH)
 910                open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
 911
 912        file = do_filp_open(fd, name, &open_exec_flags);
 913        if (IS_ERR(file))
 914                goto out;
 915
 916        /*
 917         * may_open() has already checked for this, so it should be
 918         * impossible to trip now. But we need to be extra cautious
 919         * and check again at the very end too.
 920         */
 921        err = -EACCES;
 922        if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
 923                         path_noexec(&file->f_path)))
 924                goto exit;
 925
 926        err = deny_write_access(file);
 927        if (err)
 928                goto exit;
 929
 930        if (name->name[0] != '\0')
 931                fsnotify_open(file);
 932
 933out:
 934        return file;
 935
 936exit:
 937        fput(file);
 938        return ERR_PTR(err);
 939}
 940
 941struct file *open_exec(const char *name)
 942{
 943        struct filename *filename = getname_kernel(name);
 944        struct file *f = ERR_CAST(filename);
 945
 946        if (!IS_ERR(filename)) {
 947                f = do_open_execat(AT_FDCWD, filename, 0);
 948                putname(filename);
 949        }
 950        return f;
 951}
 952EXPORT_SYMBOL(open_exec);
 953
 954#if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
 955    defined(CONFIG_BINFMT_ELF_FDPIC)
 956ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
 957{
 958        ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
 959        if (res > 0)
 960                flush_icache_user_range(addr, addr + len);
 961        return res;
 962}
 963EXPORT_SYMBOL(read_code);
 964#endif
 965
 966/*
 967 * Maps the mm_struct mm into the current task struct.
 968 * On success, this function returns with exec_update_lock
 969 * held for writing.
 970 */
 971static int exec_mmap(struct mm_struct *mm)
 972{
 973        struct task_struct *tsk;
 974        struct mm_struct *old_mm, *active_mm;
 975        int ret;
 976
 977        /* Notify parent that we're no longer interested in the old VM */
 978        tsk = current;
 979        old_mm = current->mm;
 980        exec_mm_release(tsk, old_mm);
 981        if (old_mm)
 982                sync_mm_rss(old_mm);
 983
 984        ret = down_write_killable(&tsk->signal->exec_update_lock);
 985        if (ret)
 986                return ret;
 987
 988        if (old_mm) {
 989                /*
 990                 * Make sure that if there is a core dump in progress
 991                 * for the old mm, we get out and die instead of going
 992                 * through with the exec.  We must hold mmap_lock around
 993                 * checking core_state and changing tsk->mm.
 994                 */
 995                mmap_read_lock(old_mm);
 996                if (unlikely(old_mm->core_state)) {
 997                        mmap_read_unlock(old_mm);
 998                        up_write(&tsk->signal->exec_update_lock);
 999                        return -EINTR;
1000                }
1001        }
1002
1003        task_lock(tsk);
1004        membarrier_exec_mmap(mm);
1005
1006        local_irq_disable();
1007        active_mm = tsk->active_mm;
1008        tsk->active_mm = mm;
1009        tsk->mm = mm;
1010        /*
1011         * This prevents preemption while active_mm is being loaded and
1012         * it and mm are being updated, which could cause problems for
1013         * lazy tlb mm refcounting when these are updated by context
1014         * switches. Not all architectures can handle irqs off over
1015         * activate_mm yet.
1016         */
1017        if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1018                local_irq_enable();
1019        activate_mm(active_mm, mm);
1020        if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1021                local_irq_enable();
1022        tsk->mm->vmacache_seqnum = 0;
1023        vmacache_flush(tsk);
1024        task_unlock(tsk);
1025        if (old_mm) {
1026                mmap_read_unlock(old_mm);
1027                BUG_ON(active_mm != old_mm);
1028                setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1029                mm_update_next_owner(old_mm);
1030                mmput(old_mm);
1031                return 0;
1032        }
1033        mmdrop(active_mm);
1034        return 0;
1035}
1036
1037static int de_thread(struct task_struct *tsk)
1038{
1039        struct signal_struct *sig = tsk->signal;
1040        struct sighand_struct *oldsighand = tsk->sighand;
1041        spinlock_t *lock = &oldsighand->siglock;
1042
1043        if (thread_group_empty(tsk))
1044                goto no_thread_group;
1045
1046        /*
1047         * Kill all other threads in the thread group.
1048         */
1049        spin_lock_irq(lock);
1050        if (signal_group_exit(sig)) {
1051                /*
1052                 * Another group action in progress, just
1053                 * return so that the signal is processed.
1054                 */
1055                spin_unlock_irq(lock);
1056                return -EAGAIN;
1057        }
1058
1059        sig->group_exit_task = tsk;
1060        sig->notify_count = zap_other_threads(tsk);
1061        if (!thread_group_leader(tsk))
1062                sig->notify_count--;
1063
1064        while (sig->notify_count) {
1065                __set_current_state(TASK_KILLABLE);
1066                spin_unlock_irq(lock);
1067                schedule();
1068                if (__fatal_signal_pending(tsk))
1069                        goto killed;
1070                spin_lock_irq(lock);
1071        }
1072        spin_unlock_irq(lock);
1073
1074        /*
1075         * At this point all other threads have exited, all we have to
1076         * do is to wait for the thread group leader to become inactive,
1077         * and to assume its PID:
1078         */
1079        if (!thread_group_leader(tsk)) {
1080                struct task_struct *leader = tsk->group_leader;
1081
1082                for (;;) {
1083                        cgroup_threadgroup_change_begin(tsk);
1084                        write_lock_irq(&tasklist_lock);
1085                        /*
1086                         * Do this under tasklist_lock to ensure that
1087                         * exit_notify() can't miss ->group_exit_task
1088                         */
1089                        sig->notify_count = -1;
1090                        if (likely(leader->exit_state))
1091                                break;
1092                        __set_current_state(TASK_KILLABLE);
1093                        write_unlock_irq(&tasklist_lock);
1094                        cgroup_threadgroup_change_end(tsk);
1095                        schedule();
1096                        if (__fatal_signal_pending(tsk))
1097                                goto killed;
1098                }
1099
1100                /*
1101                 * The only record we have of the real-time age of a
1102                 * process, regardless of execs it's done, is start_time.
1103                 * All the past CPU time is accumulated in signal_struct
1104                 * from sister threads now dead.  But in this non-leader
1105                 * exec, nothing survives from the original leader thread,
1106                 * whose birth marks the true age of this process now.
1107                 * When we take on its identity by switching to its PID, we
1108                 * also take its birthdate (always earlier than our own).
1109                 */
1110                tsk->start_time = leader->start_time;
1111                tsk->start_boottime = leader->start_boottime;
1112
1113                BUG_ON(!same_thread_group(leader, tsk));
1114                /*
1115                 * An exec() starts a new thread group with the
1116                 * TGID of the previous thread group. Rehash the
1117                 * two threads with a switched PID, and release
1118                 * the former thread group leader:
1119                 */
1120
1121                /* Become a process group leader with the old leader's pid.
1122                 * The old leader becomes a thread of the this thread group.
1123                 */
1124                exchange_tids(tsk, leader);
1125                transfer_pid(leader, tsk, PIDTYPE_TGID);
1126                transfer_pid(leader, tsk, PIDTYPE_PGID);
1127                transfer_pid(leader, tsk, PIDTYPE_SID);
1128
1129                list_replace_rcu(&leader->tasks, &tsk->tasks);
1130                list_replace_init(&leader->sibling, &tsk->sibling);
1131
1132                tsk->group_leader = tsk;
1133                leader->group_leader = tsk;
1134
1135                tsk->exit_signal = SIGCHLD;
1136                leader->exit_signal = -1;
1137
1138                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1139                leader->exit_state = EXIT_DEAD;
1140
1141                /*
1142                 * We are going to release_task()->ptrace_unlink() silently,
1143                 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1144                 * the tracer wont't block again waiting for this thread.
1145                 */
1146                if (unlikely(leader->ptrace))
1147                        __wake_up_parent(leader, leader->parent);
1148                write_unlock_irq(&tasklist_lock);
1149                cgroup_threadgroup_change_end(tsk);
1150
1151                release_task(leader);
1152        }
1153
1154        sig->group_exit_task = NULL;
1155        sig->notify_count = 0;
1156
1157no_thread_group:
1158        /* we have changed execution domain */
1159        tsk->exit_signal = SIGCHLD;
1160
1161        BUG_ON(!thread_group_leader(tsk));
1162        return 0;
1163
1164killed:
1165        /* protects against exit_notify() and __exit_signal() */
1166        read_lock(&tasklist_lock);
1167        sig->group_exit_task = NULL;
1168        sig->notify_count = 0;
1169        read_unlock(&tasklist_lock);
1170        return -EAGAIN;
1171}
1172
1173
1174/*
1175 * This function makes sure the current process has its own signal table,
1176 * so that flush_signal_handlers can later reset the handlers without
1177 * disturbing other processes.  (Other processes might share the signal
1178 * table via the CLONE_SIGHAND option to clone().)
1179 */
1180static int unshare_sighand(struct task_struct *me)
1181{
1182        struct sighand_struct *oldsighand = me->sighand;
1183
1184        if (refcount_read(&oldsighand->count) != 1) {
1185                struct sighand_struct *newsighand;
1186                /*
1187                 * This ->sighand is shared with the CLONE_SIGHAND
1188                 * but not CLONE_THREAD task, switch to the new one.
1189                 */
1190                newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1191                if (!newsighand)
1192                        return -ENOMEM;
1193
1194                refcount_set(&newsighand->count, 1);
1195                memcpy(newsighand->action, oldsighand->action,
1196                       sizeof(newsighand->action));
1197
1198                write_lock_irq(&tasklist_lock);
1199                spin_lock(&oldsighand->siglock);
1200                rcu_assign_pointer(me->sighand, newsighand);
1201                spin_unlock(&oldsighand->siglock);
1202                write_unlock_irq(&tasklist_lock);
1203
1204                __cleanup_sighand(oldsighand);
1205        }
1206        return 0;
1207}
1208
1209char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1210{
1211        task_lock(tsk);
1212        strncpy(buf, tsk->comm, buf_size);
1213        task_unlock(tsk);
1214        return buf;
1215}
1216EXPORT_SYMBOL_GPL(__get_task_comm);
1217
1218/*
1219 * These functions flushes out all traces of the currently running executable
1220 * so that a new one can be started
1221 */
1222
1223void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1224{
1225        task_lock(tsk);
1226        trace_task_rename(tsk, buf);
1227        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1228        task_unlock(tsk);
1229        perf_event_comm(tsk, exec);
1230}
1231
1232/*
1233 * Calling this is the point of no return. None of the failures will be
1234 * seen by userspace since either the process is already taking a fatal
1235 * signal (via de_thread() or coredump), or will have SEGV raised
1236 * (after exec_mmap()) by search_binary_handler (see below).
1237 */
1238int begin_new_exec(struct linux_binprm * bprm)
1239{
1240        struct task_struct *me = current;
1241        int retval;
1242
1243        /* Once we are committed compute the creds */
1244        retval = bprm_creds_from_file(bprm);
1245        if (retval)
1246                return retval;
1247
1248        /*
1249         * Ensure all future errors are fatal.
1250         */
1251        bprm->point_of_no_return = true;
1252
1253        /*
1254         * Make this the only thread in the thread group.
1255         */
1256        retval = de_thread(me);
1257        if (retval)
1258                goto out;
1259
1260        /*
1261         * Cancel any io_uring activity across execve
1262         */
1263        io_uring_task_cancel();
1264
1265        /* Ensure the files table is not shared. */
1266        retval = unshare_files();
1267        if (retval)
1268                goto out;
1269
1270        /*
1271         * Must be called _before_ exec_mmap() as bprm->mm is
1272         * not visibile until then. This also enables the update
1273         * to be lockless.
1274         */
1275        retval = set_mm_exe_file(bprm->mm, bprm->file);
1276        if (retval)
1277                goto out;
1278
1279        /* If the binary is not readable then enforce mm->dumpable=0 */
1280        would_dump(bprm, bprm->file);
1281        if (bprm->have_execfd)
1282                would_dump(bprm, bprm->executable);
1283
1284        /*
1285         * Release all of the old mmap stuff
1286         */
1287        acct_arg_size(bprm, 0);
1288        retval = exec_mmap(bprm->mm);
1289        if (retval)
1290                goto out;
1291
1292        bprm->mm = NULL;
1293
1294#ifdef CONFIG_POSIX_TIMERS
1295        exit_itimers(me->signal);
1296        flush_itimer_signals();
1297#endif
1298
1299        /*
1300         * Make the signal table private.
1301         */
1302        retval = unshare_sighand(me);
1303        if (retval)
1304                goto out_unlock;
1305
1306        /*
1307         * Ensure that the uaccess routines can actually operate on userspace
1308         * pointers:
1309         */
1310        force_uaccess_begin();
1311
1312        me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1313                                        PF_NOFREEZE | PF_NO_SETAFFINITY);
1314        flush_thread();
1315        me->personality &= ~bprm->per_clear;
1316
1317        clear_syscall_work_syscall_user_dispatch(me);
1318
1319        /*
1320         * We have to apply CLOEXEC before we change whether the process is
1321         * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1322         * trying to access the should-be-closed file descriptors of a process
1323         * undergoing exec(2).
1324         */
1325        do_close_on_exec(me->files);
1326
1327        if (bprm->secureexec) {
1328                /* Make sure parent cannot signal privileged process. */
1329                me->pdeath_signal = 0;
1330
1331                /*
1332                 * For secureexec, reset the stack limit to sane default to
1333                 * avoid bad behavior from the prior rlimits. This has to
1334                 * happen before arch_pick_mmap_layout(), which examines
1335                 * RLIMIT_STACK, but after the point of no return to avoid
1336                 * needing to clean up the change on failure.
1337                 */
1338                if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1339                        bprm->rlim_stack.rlim_cur = _STK_LIM;
1340        }
1341
1342        me->sas_ss_sp = me->sas_ss_size = 0;
1343
1344        /*
1345         * Figure out dumpability. Note that this checking only of current
1346         * is wrong, but userspace depends on it. This should be testing
1347         * bprm->secureexec instead.
1348         */
1349        if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1350            !(uid_eq(current_euid(), current_uid()) &&
1351              gid_eq(current_egid(), current_gid())))
1352                set_dumpable(current->mm, suid_dumpable);
1353        else
1354                set_dumpable(current->mm, SUID_DUMP_USER);
1355
1356        perf_event_exec();
1357        __set_task_comm(me, kbasename(bprm->filename), true);
1358
1359        /* An exec changes our domain. We are no longer part of the thread
1360           group */
1361        WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
1362        flush_signal_handlers(me, 0);
1363
1364        retval = set_cred_ucounts(bprm->cred);
1365        if (retval < 0)
1366                goto out_unlock;
1367
1368        /*
1369         * install the new credentials for this executable
1370         */
1371        security_bprm_committing_creds(bprm);
1372
1373        commit_creds(bprm->cred);
1374        bprm->cred = NULL;
1375
1376        /*
1377         * Disable monitoring for regular users
1378         * when executing setuid binaries. Must
1379         * wait until new credentials are committed
1380         * by commit_creds() above
1381         */
1382        if (get_dumpable(me->mm) != SUID_DUMP_USER)
1383                perf_event_exit_task(me);
1384        /*
1385         * cred_guard_mutex must be held at least to this point to prevent
1386         * ptrace_attach() from altering our determination of the task's
1387         * credentials; any time after this it may be unlocked.
1388         */
1389        security_bprm_committed_creds(bprm);
1390
1391        /* Pass the opened binary to the interpreter. */
1392        if (bprm->have_execfd) {
1393                retval = get_unused_fd_flags(0);
1394                if (retval < 0)
1395                        goto out_unlock;
1396                fd_install(retval, bprm->executable);
1397                bprm->executable = NULL;
1398                bprm->execfd = retval;
1399        }
1400        return 0;
1401
1402out_unlock:
1403        up_write(&me->signal->exec_update_lock);
1404out:
1405        return retval;
1406}
1407EXPORT_SYMBOL(begin_new_exec);
1408
1409void would_dump(struct linux_binprm *bprm, struct file *file)
1410{
1411        struct inode *inode = file_inode(file);
1412        struct user_namespace *mnt_userns = file_mnt_user_ns(file);
1413        if (inode_permission(mnt_userns, inode, MAY_READ) < 0) {
1414                struct user_namespace *old, *user_ns;
1415                bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1416
1417                /* Ensure mm->user_ns contains the executable */
1418                user_ns = old = bprm->mm->user_ns;
1419                while ((user_ns != &init_user_ns) &&
1420                       !privileged_wrt_inode_uidgid(user_ns, mnt_userns, inode))
1421                        user_ns = user_ns->parent;
1422
1423                if (old != user_ns) {
1424                        bprm->mm->user_ns = get_user_ns(user_ns);
1425                        put_user_ns(old);
1426                }
1427        }
1428}
1429EXPORT_SYMBOL(would_dump);
1430
1431void setup_new_exec(struct linux_binprm * bprm)
1432{
1433        /* Setup things that can depend upon the personality */
1434        struct task_struct *me = current;
1435
1436        arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
1437
1438        arch_setup_new_exec();
1439
1440        /* Set the new mm task size. We have to do that late because it may
1441         * depend on TIF_32BIT which is only updated in flush_thread() on
1442         * some architectures like powerpc
1443         */
1444        me->mm->task_size = TASK_SIZE;
1445        up_write(&me->signal->exec_update_lock);
1446        mutex_unlock(&me->signal->cred_guard_mutex);
1447}
1448EXPORT_SYMBOL(setup_new_exec);
1449
1450/* Runs immediately before start_thread() takes over. */
1451void finalize_exec(struct linux_binprm *bprm)
1452{
1453        /* Store any stack rlimit changes before starting thread. */
1454        task_lock(current->group_leader);
1455        current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1456        task_unlock(current->group_leader);
1457}
1458EXPORT_SYMBOL(finalize_exec);
1459
1460/*
1461 * Prepare credentials and lock ->cred_guard_mutex.
1462 * setup_new_exec() commits the new creds and drops the lock.
1463 * Or, if exec fails before, free_bprm() should release ->cred
1464 * and unlock.
1465 */
1466static int prepare_bprm_creds(struct linux_binprm *bprm)
1467{
1468        if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1469                return -ERESTARTNOINTR;
1470
1471        bprm->cred = prepare_exec_creds();
1472        if (likely(bprm->cred))
1473                return 0;
1474
1475        mutex_unlock(&current->signal->cred_guard_mutex);
1476        return -ENOMEM;
1477}
1478
1479static void free_bprm(struct linux_binprm *bprm)
1480{
1481        if (bprm->mm) {
1482                acct_arg_size(bprm, 0);
1483                mmput(bprm->mm);
1484        }
1485        free_arg_pages(bprm);
1486        if (bprm->cred) {
1487                mutex_unlock(&current->signal->cred_guard_mutex);
1488                abort_creds(bprm->cred);
1489        }
1490        if (bprm->file) {
1491                allow_write_access(bprm->file);
1492                fput(bprm->file);
1493        }
1494        if (bprm->executable)
1495                fput(bprm->executable);
1496        /* If a binfmt changed the interp, free it. */
1497        if (bprm->interp != bprm->filename)
1498                kfree(bprm->interp);
1499        kfree(bprm->fdpath);
1500        kfree(bprm);
1501}
1502
1503static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
1504{
1505        struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1506        int retval = -ENOMEM;
1507        if (!bprm)
1508                goto out;
1509
1510        if (fd == AT_FDCWD || filename->name[0] == '/') {
1511                bprm->filename = filename->name;
1512        } else {
1513                if (filename->name[0] == '\0')
1514                        bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1515                else
1516                        bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1517                                                  fd, filename->name);
1518                if (!bprm->fdpath)
1519                        goto out_free;
1520
1521                bprm->filename = bprm->fdpath;
1522        }
1523        bprm->interp = bprm->filename;
1524
1525        retval = bprm_mm_init(bprm);
1526        if (retval)
1527                goto out_free;
1528        return bprm;
1529
1530out_free:
1531        free_bprm(bprm);
1532out:
1533        return ERR_PTR(retval);
1534}
1535
1536int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1537{
1538        /* If a binfmt changed the interp, free it first. */
1539        if (bprm->interp != bprm->filename)
1540                kfree(bprm->interp);
1541        bprm->interp = kstrdup(interp, GFP_KERNEL);
1542        if (!bprm->interp)
1543                return -ENOMEM;
1544        return 0;
1545}
1546EXPORT_SYMBOL(bprm_change_interp);
1547
1548/*
1549 * determine how safe it is to execute the proposed program
1550 * - the caller must hold ->cred_guard_mutex to protect against
1551 *   PTRACE_ATTACH or seccomp thread-sync
1552 */
1553static void check_unsafe_exec(struct linux_binprm *bprm)
1554{
1555        struct task_struct *p = current, *t;
1556        unsigned n_fs;
1557
1558        if (p->ptrace)
1559                bprm->unsafe |= LSM_UNSAFE_PTRACE;
1560
1561        /*
1562         * This isn't strictly necessary, but it makes it harder for LSMs to
1563         * mess up.
1564         */
1565        if (task_no_new_privs(current))
1566                bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1567
1568        t = p;
1569        n_fs = 1;
1570        spin_lock(&p->fs->lock);
1571        rcu_read_lock();
1572        while_each_thread(p, t) {
1573                if (t->fs == p->fs)
1574                        n_fs++;
1575        }
1576        rcu_read_unlock();
1577
1578        if (p->fs->users > n_fs)
1579                bprm->unsafe |= LSM_UNSAFE_SHARE;
1580        else
1581                p->fs->in_exec = 1;
1582        spin_unlock(&p->fs->lock);
1583}
1584
1585static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
1586{
1587        /* Handle suid and sgid on files */
1588        struct user_namespace *mnt_userns;
1589        struct inode *inode;
1590        unsigned int mode;
1591        kuid_t uid;
1592        kgid_t gid;
1593
1594        if (!mnt_may_suid(file->f_path.mnt))
1595                return;
1596
1597        if (task_no_new_privs(current))
1598                return;
1599
1600        inode = file->f_path.dentry->d_inode;
1601        mode = READ_ONCE(inode->i_mode);
1602        if (!(mode & (S_ISUID|S_ISGID)))
1603                return;
1604
1605        mnt_userns = file_mnt_user_ns(file);
1606
1607        /* Be careful if suid/sgid is set */
1608        inode_lock(inode);
1609
1610        /* reload atomically mode/uid/gid now that lock held */
1611        mode = inode->i_mode;
1612        uid = i_uid_into_mnt(mnt_userns, inode);
1613        gid = i_gid_into_mnt(mnt_userns, inode);
1614        inode_unlock(inode);
1615
1616        /* We ignore suid/sgid if there are no mappings for them in the ns */
1617        if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1618                 !kgid_has_mapping(bprm->cred->user_ns, gid))
1619                return;
1620
1621        if (mode & S_ISUID) {
1622                bprm->per_clear |= PER_CLEAR_ON_SETID;
1623                bprm->cred->euid = uid;
1624        }
1625
1626        if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1627                bprm->per_clear |= PER_CLEAR_ON_SETID;
1628                bprm->cred->egid = gid;
1629        }
1630}
1631
1632/*
1633 * Compute brpm->cred based upon the final binary.
1634 */
1635static int bprm_creds_from_file(struct linux_binprm *bprm)
1636{
1637        /* Compute creds based on which file? */
1638        struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
1639
1640        bprm_fill_uid(bprm, file);
1641        return security_bprm_creds_from_file(bprm, file);
1642}
1643
1644/*
1645 * Fill the binprm structure from the inode.
1646 * Read the first BINPRM_BUF_SIZE bytes
1647 *
1648 * This may be called multiple times for binary chains (scripts for example).
1649 */
1650static int prepare_binprm(struct linux_binprm *bprm)
1651{
1652        loff_t pos = 0;
1653
1654        memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1655        return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1656}
1657
1658/*
1659 * Arguments are '\0' separated strings found at the location bprm->p
1660 * points to; chop off the first by relocating brpm->p to right after
1661 * the first '\0' encountered.
1662 */
1663int remove_arg_zero(struct linux_binprm *bprm)
1664{
1665        int ret = 0;
1666        unsigned long offset;
1667        char *kaddr;
1668        struct page *page;
1669
1670        if (!bprm->argc)
1671                return 0;
1672
1673        do {
1674                offset = bprm->p & ~PAGE_MASK;
1675                page = get_arg_page(bprm, bprm->p, 0);
1676                if (!page) {
1677                        ret = -EFAULT;
1678                        goto out;
1679                }
1680                kaddr = kmap_atomic(page);
1681
1682                for (; offset < PAGE_SIZE && kaddr[offset];
1683                                offset++, bprm->p++)
1684                        ;
1685
1686                kunmap_atomic(kaddr);
1687                put_arg_page(page);
1688        } while (offset == PAGE_SIZE);
1689
1690        bprm->p++;
1691        bprm->argc--;
1692        ret = 0;
1693
1694out:
1695        return ret;
1696}
1697EXPORT_SYMBOL(remove_arg_zero);
1698
1699#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1700/*
1701 * cycle the list of binary formats handler, until one recognizes the image
1702 */
1703static int search_binary_handler(struct linux_binprm *bprm)
1704{
1705        bool need_retry = IS_ENABLED(CONFIG_MODULES);
1706        struct linux_binfmt *fmt;
1707        int retval;
1708
1709        retval = prepare_binprm(bprm);
1710        if (retval < 0)
1711                return retval;
1712
1713        retval = security_bprm_check(bprm);
1714        if (retval)
1715                return retval;
1716
1717        retval = -ENOENT;
1718 retry:
1719        read_lock(&binfmt_lock);
1720        list_for_each_entry(fmt, &formats, lh) {
1721                if (!try_module_get(fmt->module))
1722                        continue;
1723                read_unlock(&binfmt_lock);
1724
1725                retval = fmt->load_binary(bprm);
1726
1727                read_lock(&binfmt_lock);
1728                put_binfmt(fmt);
1729                if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
1730                        read_unlock(&binfmt_lock);
1731                        return retval;
1732                }
1733        }
1734        read_unlock(&binfmt_lock);
1735
1736        if (need_retry) {
1737                if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1738                    printable(bprm->buf[2]) && printable(bprm->buf[3]))
1739                        return retval;
1740                if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1741                        return retval;
1742                need_retry = false;
1743                goto retry;
1744        }
1745
1746        return retval;
1747}
1748
1749static int exec_binprm(struct linux_binprm *bprm)
1750{
1751        pid_t old_pid, old_vpid;
1752        int ret, depth;
1753
1754        /* Need to fetch pid before load_binary changes it */
1755        old_pid = current->pid;
1756        rcu_read_lock();
1757        old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1758        rcu_read_unlock();
1759
1760        /* This allows 4 levels of binfmt rewrites before failing hard. */
1761        for (depth = 0;; depth++) {
1762                struct file *exec;
1763                if (depth > 5)
1764                        return -ELOOP;
1765
1766                ret = search_binary_handler(bprm);
1767                if (ret < 0)
1768                        return ret;
1769                if (!bprm->interpreter)
1770                        break;
1771
1772                exec = bprm->file;
1773                bprm->file = bprm->interpreter;
1774                bprm->interpreter = NULL;
1775
1776                allow_write_access(exec);
1777                if (unlikely(bprm->have_execfd)) {
1778                        if (bprm->executable) {
1779                                fput(exec);
1780                                return -ENOEXEC;
1781                        }
1782                        bprm->executable = exec;
1783                } else
1784                        fput(exec);
1785        }
1786
1787        audit_bprm(bprm);
1788        trace_sched_process_exec(current, old_pid, bprm);
1789        ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1790        proc_exec_connector(current);
1791        return 0;
1792}
1793
1794/*
1795 * sys_execve() executes a new program.
1796 */
1797static int bprm_execve(struct linux_binprm *bprm,
1798                       int fd, struct filename *filename, int flags)
1799{
1800        struct file *file;
1801        int retval;
1802
1803        retval = prepare_bprm_creds(bprm);
1804        if (retval)
1805                return retval;
1806
1807        check_unsafe_exec(bprm);
1808        current->in_execve = 1;
1809
1810        file = do_open_execat(fd, filename, flags);
1811        retval = PTR_ERR(file);
1812        if (IS_ERR(file))
1813                goto out_unmark;
1814
1815        sched_exec();
1816
1817        bprm->file = file;
1818        /*
1819         * Record that a name derived from an O_CLOEXEC fd will be
1820         * inaccessible after exec.  This allows the code in exec to
1821         * choose to fail when the executable is not mmaped into the
1822         * interpreter and an open file descriptor is not passed to
1823         * the interpreter.  This makes for a better user experience
1824         * than having the interpreter start and then immediately fail
1825         * when it finds the executable is inaccessible.
1826         */
1827        if (bprm->fdpath && get_close_on_exec(fd))
1828                bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1829
1830        /* Set the unchanging part of bprm->cred */
1831        retval = security_bprm_creds_for_exec(bprm);
1832        if (retval)
1833                goto out;
1834
1835        retval = exec_binprm(bprm);
1836        if (retval < 0)
1837                goto out;
1838
1839        /* execve succeeded */
1840        current->fs->in_exec = 0;
1841        current->in_execve = 0;
1842        rseq_execve(current);
1843        acct_update_integrals(current);
1844        task_numa_free(current, false);
1845        return retval;
1846
1847out:
1848        /*
1849         * If past the point of no return ensure the code never
1850         * returns to the userspace process.  Use an existing fatal
1851         * signal if present otherwise terminate the process with
1852         * SIGSEGV.
1853         */
1854        if (bprm->point_of_no_return && !fatal_signal_pending(current))
1855                force_sigsegv(SIGSEGV);
1856
1857out_unmark:
1858        current->fs->in_exec = 0;
1859        current->in_execve = 0;
1860
1861        return retval;
1862}
1863
1864static int do_execveat_common(int fd, struct filename *filename,
1865                              struct user_arg_ptr argv,
1866                              struct user_arg_ptr envp,
1867                              int flags)
1868{
1869        struct linux_binprm *bprm;
1870        int retval;
1871
1872        if (IS_ERR(filename))
1873                return PTR_ERR(filename);
1874
1875        /*
1876         * We move the actual failure in case of RLIMIT_NPROC excess from
1877         * set*uid() to execve() because too many poorly written programs
1878         * don't check setuid() return code.  Here we additionally recheck
1879         * whether NPROC limit is still exceeded.
1880         */
1881        if ((current->flags & PF_NPROC_EXCEEDED) &&
1882            is_ucounts_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
1883                retval = -EAGAIN;
1884                goto out_ret;
1885        }
1886
1887        /* We're below the limit (still or again), so we don't want to make
1888         * further execve() calls fail. */
1889        current->flags &= ~PF_NPROC_EXCEEDED;
1890
1891        bprm = alloc_bprm(fd, filename);
1892        if (IS_ERR(bprm)) {
1893                retval = PTR_ERR(bprm);
1894                goto out_ret;
1895        }
1896
1897        retval = count(argv, MAX_ARG_STRINGS);
1898        if (retval < 0)
1899                goto out_free;
1900        bprm->argc = retval;
1901
1902        retval = count(envp, MAX_ARG_STRINGS);
1903        if (retval < 0)
1904                goto out_free;
1905        bprm->envc = retval;
1906
1907        retval = bprm_stack_limits(bprm);
1908        if (retval < 0)
1909                goto out_free;
1910
1911        retval = copy_string_kernel(bprm->filename, bprm);
1912        if (retval < 0)
1913                goto out_free;
1914        bprm->exec = bprm->p;
1915
1916        retval = copy_strings(bprm->envc, envp, bprm);
1917        if (retval < 0)
1918                goto out_free;
1919
1920        retval = copy_strings(bprm->argc, argv, bprm);
1921        if (retval < 0)
1922                goto out_free;
1923
1924        retval = bprm_execve(bprm, fd, filename, flags);
1925out_free:
1926        free_bprm(bprm);
1927
1928out_ret:
1929        putname(filename);
1930        return retval;
1931}
1932
1933int kernel_execve(const char *kernel_filename,
1934                  const char *const *argv, const char *const *envp)
1935{
1936        struct filename *filename;
1937        struct linux_binprm *bprm;
1938        int fd = AT_FDCWD;
1939        int retval;
1940
1941        filename = getname_kernel(kernel_filename);
1942        if (IS_ERR(filename))
1943                return PTR_ERR(filename);
1944
1945        bprm = alloc_bprm(fd, filename);
1946        if (IS_ERR(bprm)) {
1947                retval = PTR_ERR(bprm);
1948                goto out_ret;
1949        }
1950
1951        retval = count_strings_kernel(argv);
1952        if (retval < 0)
1953                goto out_free;
1954        bprm->argc = retval;
1955
1956        retval = count_strings_kernel(envp);
1957        if (retval < 0)
1958                goto out_free;
1959        bprm->envc = retval;
1960
1961        retval = bprm_stack_limits(bprm);
1962        if (retval < 0)
1963                goto out_free;
1964
1965        retval = copy_string_kernel(bprm->filename, bprm);
1966        if (retval < 0)
1967                goto out_free;
1968        bprm->exec = bprm->p;
1969
1970        retval = copy_strings_kernel(bprm->envc, envp, bprm);
1971        if (retval < 0)
1972                goto out_free;
1973
1974        retval = copy_strings_kernel(bprm->argc, argv, bprm);
1975        if (retval < 0)
1976                goto out_free;
1977
1978        retval = bprm_execve(bprm, fd, filename, 0);
1979out_free:
1980        free_bprm(bprm);
1981out_ret:
1982        putname(filename);
1983        return retval;
1984}
1985
1986static int do_execve(struct filename *filename,
1987        const char __user *const __user *__argv,
1988        const char __user *const __user *__envp)
1989{
1990        struct user_arg_ptr argv = { .ptr.native = __argv };
1991        struct user_arg_ptr envp = { .ptr.native = __envp };
1992        return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1993}
1994
1995static int do_execveat(int fd, struct filename *filename,
1996                const char __user *const __user *__argv,
1997                const char __user *const __user *__envp,
1998                int flags)
1999{
2000        struct user_arg_ptr argv = { .ptr.native = __argv };
2001        struct user_arg_ptr envp = { .ptr.native = __envp };
2002
2003        return do_execveat_common(fd, filename, argv, envp, flags);
2004}
2005
2006#ifdef CONFIG_COMPAT
2007static int compat_do_execve(struct filename *filename,
2008        const compat_uptr_t __user *__argv,
2009        const compat_uptr_t __user *__envp)
2010{
2011        struct user_arg_ptr argv = {
2012                .is_compat = true,
2013                .ptr.compat = __argv,
2014        };
2015        struct user_arg_ptr envp = {
2016                .is_compat = true,
2017                .ptr.compat = __envp,
2018        };
2019        return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
2020}
2021
2022static int compat_do_execveat(int fd, struct filename *filename,
2023                              const compat_uptr_t __user *__argv,
2024                              const compat_uptr_t __user *__envp,
2025                              int flags)
2026{
2027        struct user_arg_ptr argv = {
2028                .is_compat = true,
2029                .ptr.compat = __argv,
2030        };
2031        struct user_arg_ptr envp = {
2032                .is_compat = true,
2033                .ptr.compat = __envp,
2034        };
2035        return do_execveat_common(fd, filename, argv, envp, flags);
2036}
2037#endif
2038
2039void set_binfmt(struct linux_binfmt *new)
2040{
2041        struct mm_struct *mm = current->mm;
2042
2043        if (mm->binfmt)
2044                module_put(mm->binfmt->module);
2045
2046        mm->binfmt = new;
2047        if (new)
2048                __module_get(new->module);
2049}
2050EXPORT_SYMBOL(set_binfmt);
2051
2052/*
2053 * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
2054 */
2055void set_dumpable(struct mm_struct *mm, int value)
2056{
2057        if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
2058                return;
2059
2060        set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
2061}
2062
2063SYSCALL_DEFINE3(execve,
2064                const char __user *, filename,
2065                const char __user *const __user *, argv,
2066                const char __user *const __user *, envp)
2067{
2068        return do_execve(getname(filename), argv, envp);
2069}
2070
2071SYSCALL_DEFINE5(execveat,
2072                int, fd, const char __user *, filename,
2073                const char __user *const __user *, argv,
2074                const char __user *const __user *, envp,
2075                int, flags)
2076{
2077        return do_execveat(fd,
2078                           getname_uflags(filename, flags),
2079                           argv, envp, flags);
2080}
2081
2082#ifdef CONFIG_COMPAT
2083COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
2084        const compat_uptr_t __user *, argv,
2085        const compat_uptr_t __user *, envp)
2086{
2087        return compat_do_execve(getname(filename), argv, envp);
2088}
2089
2090COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
2091                       const char __user *, filename,
2092                       const compat_uptr_t __user *, argv,
2093                       const compat_uptr_t __user *, envp,
2094                       int,  flags)
2095{
2096        return compat_do_execveat(fd,
2097                                  getname_uflags(filename, flags),
2098                                  argv, envp, flags);
2099}
2100#endif
2101