linux/kernel/umh.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * umh - the kernel usermode helper
   4 */
   5#include <linux/module.h>
   6#include <linux/sched.h>
   7#include <linux/sched/task.h>
   8#include <linux/binfmts.h>
   9#include <linux/syscalls.h>
  10#include <linux/unistd.h>
  11#include <linux/kmod.h>
  12#include <linux/slab.h>
  13#include <linux/completion.h>
  14#include <linux/cred.h>
  15#include <linux/file.h>
  16#include <linux/fdtable.h>
  17#include <linux/workqueue.h>
  18#include <linux/security.h>
  19#include <linux/mount.h>
  20#include <linux/kernel.h>
  21#include <linux/init.h>
  22#include <linux/resource.h>
  23#include <linux/notifier.h>
  24#include <linux/suspend.h>
  25#include <linux/rwsem.h>
  26#include <linux/ptrace.h>
  27#include <linux/async.h>
  28#include <linux/uaccess.h>
  29#include <linux/shmem_fs.h>
  30#include <linux/pipe_fs_i.h>
  31
  32#include <trace/events/module.h>
  33
  34#define CAP_BSET        (void *)1
  35#define CAP_PI          (void *)2
  36
  37static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
  38static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
  39static DEFINE_SPINLOCK(umh_sysctl_lock);
  40static DECLARE_RWSEM(umhelper_sem);
  41static LIST_HEAD(umh_list);
  42static DEFINE_MUTEX(umh_list_lock);
  43
  44static void call_usermodehelper_freeinfo(struct subprocess_info *info)
  45{
  46        if (info->cleanup)
  47                (*info->cleanup)(info);
  48        kfree(info);
  49}
  50
  51static void umh_complete(struct subprocess_info *sub_info)
  52{
  53        struct completion *comp = xchg(&sub_info->complete, NULL);
  54        /*
  55         * See call_usermodehelper_exec(). If xchg() returns NULL
  56         * we own sub_info, the UMH_KILLABLE caller has gone away
  57         * or the caller used UMH_NO_WAIT.
  58         */
  59        if (comp)
  60                complete(comp);
  61        else
  62                call_usermodehelper_freeinfo(sub_info);
  63}
  64
  65/*
  66 * This is the task which runs the usermode application
  67 */
  68static int call_usermodehelper_exec_async(void *data)
  69{
  70        struct subprocess_info *sub_info = data;
  71        struct cred *new;
  72        int retval;
  73
  74        spin_lock_irq(&current->sighand->siglock);
  75        flush_signal_handlers(current, 1);
  76        spin_unlock_irq(&current->sighand->siglock);
  77
  78        /*
  79         * Our parent (unbound workqueue) runs with elevated scheduling
  80         * priority. Avoid propagating that into the userspace child.
  81         */
  82        set_user_nice(current, 0);
  83
  84        retval = -ENOMEM;
  85        new = prepare_kernel_cred(current);
  86        if (!new)
  87                goto out;
  88
  89        spin_lock(&umh_sysctl_lock);
  90        new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
  91        new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
  92                                             new->cap_inheritable);
  93        spin_unlock(&umh_sysctl_lock);
  94
  95        if (sub_info->init) {
  96                retval = sub_info->init(sub_info, new);
  97                if (retval) {
  98                        abort_creds(new);
  99                        goto out;
 100                }
 101        }
 102
 103        commit_creds(new);
 104
 105        sub_info->pid = task_pid_nr(current);
 106        if (sub_info->file) {
 107                retval = do_execve_file(sub_info->file,
 108                                        sub_info->argv, sub_info->envp);
 109                if (!retval)
 110                        current->flags |= PF_UMH;
 111        } else
 112                retval = do_execve(getname_kernel(sub_info->path),
 113                                   (const char __user *const __user *)sub_info->argv,
 114                                   (const char __user *const __user *)sub_info->envp);
 115out:
 116        sub_info->retval = retval;
 117        /*
 118         * call_usermodehelper_exec_sync() will call umh_complete
 119         * if UHM_WAIT_PROC.
 120         */
 121        if (!(sub_info->wait & UMH_WAIT_PROC))
 122                umh_complete(sub_info);
 123        if (!retval)
 124                return 0;
 125        do_exit(0);
 126}
 127
 128/* Handles UMH_WAIT_PROC.  */
 129static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
 130{
 131        pid_t pid;
 132
 133        /* If SIGCLD is ignored kernel_wait4 won't populate the status. */
 134        kernel_sigaction(SIGCHLD, SIG_DFL);
 135        pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
 136        if (pid < 0) {
 137                sub_info->retval = pid;
 138        } else {
 139                int ret = -ECHILD;
 140                /*
 141                 * Normally it is bogus to call wait4() from in-kernel because
 142                 * wait4() wants to write the exit code to a userspace address.
 143                 * But call_usermodehelper_exec_sync() always runs as kernel
 144                 * thread (workqueue) and put_user() to a kernel address works
 145                 * OK for kernel threads, due to their having an mm_segment_t
 146                 * which spans the entire address space.
 147                 *
 148                 * Thus the __user pointer cast is valid here.
 149                 */
 150                kernel_wait4(pid, (int __user *)&ret, 0, NULL);
 151
 152                /*
 153                 * If ret is 0, either call_usermodehelper_exec_async failed and
 154                 * the real error code is already in sub_info->retval or
 155                 * sub_info->retval is 0 anyway, so don't mess with it then.
 156                 */
 157                if (ret)
 158                        sub_info->retval = ret;
 159        }
 160
 161        /* Restore default kernel sig handler */
 162        kernel_sigaction(SIGCHLD, SIG_IGN);
 163
 164        umh_complete(sub_info);
 165}
 166
 167/*
 168 * We need to create the usermodehelper kernel thread from a task that is affine
 169 * to an optimized set of CPUs (or nohz housekeeping ones) such that they
 170 * inherit a widest affinity irrespective of call_usermodehelper() callers with
 171 * possibly reduced affinity (eg: per-cpu workqueues). We don't want
 172 * usermodehelper targets to contend a busy CPU.
 173 *
 174 * Unbound workqueues provide such wide affinity and allow to block on
 175 * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
 176 *
 177 * Besides, workqueues provide the privilege level that caller might not have
 178 * to perform the usermodehelper request.
 179 *
 180 */
 181static void call_usermodehelper_exec_work(struct work_struct *work)
 182{
 183        struct subprocess_info *sub_info =
 184                container_of(work, struct subprocess_info, work);
 185
 186        if (sub_info->wait & UMH_WAIT_PROC) {
 187                call_usermodehelper_exec_sync(sub_info);
 188        } else {
 189                pid_t pid;
 190                /*
 191                 * Use CLONE_PARENT to reparent it to kthreadd; we do not
 192                 * want to pollute current->children, and we need a parent
 193                 * that always ignores SIGCHLD to ensure auto-reaping.
 194                 */
 195                pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
 196                                    CLONE_PARENT | SIGCHLD);
 197                if (pid < 0) {
 198                        sub_info->retval = pid;
 199                        umh_complete(sub_info);
 200                }
 201        }
 202}
 203
 204/*
 205 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
 206 * (used for preventing user land processes from being created after the user
 207 * land has been frozen during a system-wide hibernation or suspend operation).
 208 * Should always be manipulated under umhelper_sem acquired for write.
 209 */
 210static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
 211
 212/* Number of helpers running */
 213static atomic_t running_helpers = ATOMIC_INIT(0);
 214
 215/*
 216 * Wait queue head used by usermodehelper_disable() to wait for all running
 217 * helpers to finish.
 218 */
 219static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
 220
 221/*
 222 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
 223 * to become 'false'.
 224 */
 225static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
 226
 227/*
 228 * Time to wait for running_helpers to become zero before the setting of
 229 * usermodehelper_disabled in usermodehelper_disable() fails
 230 */
 231#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
 232
 233int usermodehelper_read_trylock(void)
 234{
 235        DEFINE_WAIT(wait);
 236        int ret = 0;
 237
 238        down_read(&umhelper_sem);
 239        for (;;) {
 240                prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
 241                                TASK_INTERRUPTIBLE);
 242                if (!usermodehelper_disabled)
 243                        break;
 244
 245                if (usermodehelper_disabled == UMH_DISABLED)
 246                        ret = -EAGAIN;
 247
 248                up_read(&umhelper_sem);
 249
 250                if (ret)
 251                        break;
 252
 253                schedule();
 254                try_to_freeze();
 255
 256                down_read(&umhelper_sem);
 257        }
 258        finish_wait(&usermodehelper_disabled_waitq, &wait);
 259        return ret;
 260}
 261EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
 262
 263long usermodehelper_read_lock_wait(long timeout)
 264{
 265        DEFINE_WAIT(wait);
 266
 267        if (timeout < 0)
 268                return -EINVAL;
 269
 270        down_read(&umhelper_sem);
 271        for (;;) {
 272                prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
 273                                TASK_UNINTERRUPTIBLE);
 274                if (!usermodehelper_disabled)
 275                        break;
 276
 277                up_read(&umhelper_sem);
 278
 279                timeout = schedule_timeout(timeout);
 280                if (!timeout)
 281                        break;
 282
 283                down_read(&umhelper_sem);
 284        }
 285        finish_wait(&usermodehelper_disabled_waitq, &wait);
 286        return timeout;
 287}
 288EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
 289
 290void usermodehelper_read_unlock(void)
 291{
 292        up_read(&umhelper_sem);
 293}
 294EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
 295
 296/**
 297 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
 298 * @depth: New value to assign to usermodehelper_disabled.
 299 *
 300 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
 301 * writing) and wakeup tasks waiting for it to change.
 302 */
 303void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
 304{
 305        down_write(&umhelper_sem);
 306        usermodehelper_disabled = depth;
 307        wake_up(&usermodehelper_disabled_waitq);
 308        up_write(&umhelper_sem);
 309}
 310
 311/**
 312 * __usermodehelper_disable - Prevent new helpers from being started.
 313 * @depth: New value to assign to usermodehelper_disabled.
 314 *
 315 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
 316 */
 317int __usermodehelper_disable(enum umh_disable_depth depth)
 318{
 319        long retval;
 320
 321        if (!depth)
 322                return -EINVAL;
 323
 324        down_write(&umhelper_sem);
 325        usermodehelper_disabled = depth;
 326        up_write(&umhelper_sem);
 327
 328        /*
 329         * From now on call_usermodehelper_exec() won't start any new
 330         * helpers, so it is sufficient if running_helpers turns out to
 331         * be zero at one point (it may be increased later, but that
 332         * doesn't matter).
 333         */
 334        retval = wait_event_timeout(running_helpers_waitq,
 335                                        atomic_read(&running_helpers) == 0,
 336                                        RUNNING_HELPERS_TIMEOUT);
 337        if (retval)
 338                return 0;
 339
 340        __usermodehelper_set_disable_depth(UMH_ENABLED);
 341        return -EAGAIN;
 342}
 343
 344static void helper_lock(void)
 345{
 346        atomic_inc(&running_helpers);
 347        smp_mb__after_atomic();
 348}
 349
 350static void helper_unlock(void)
 351{
 352        if (atomic_dec_and_test(&running_helpers))
 353                wake_up(&running_helpers_waitq);
 354}
 355
 356/**
 357 * call_usermodehelper_setup - prepare to call a usermode helper
 358 * @path: path to usermode executable
 359 * @argv: arg vector for process
 360 * @envp: environment for process
 361 * @gfp_mask: gfp mask for memory allocation
 362 * @cleanup: a cleanup function
 363 * @init: an init function
 364 * @data: arbitrary context sensitive data
 365 *
 366 * Returns either %NULL on allocation failure, or a subprocess_info
 367 * structure.  This should be passed to call_usermodehelper_exec to
 368 * exec the process and free the structure.
 369 *
 370 * The init function is used to customize the helper process prior to
 371 * exec.  A non-zero return code causes the process to error out, exit,
 372 * and return the failure to the calling process
 373 *
 374 * The cleanup function is just before ethe subprocess_info is about to
 375 * be freed.  This can be used for freeing the argv and envp.  The
 376 * Function must be runnable in either a process context or the
 377 * context in which call_usermodehelper_exec is called.
 378 */
 379struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
 380                char **envp, gfp_t gfp_mask,
 381                int (*init)(struct subprocess_info *info, struct cred *new),
 382                void (*cleanup)(struct subprocess_info *info),
 383                void *data)
 384{
 385        struct subprocess_info *sub_info;
 386        sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
 387        if (!sub_info)
 388                goto out;
 389
 390        INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
 391
 392#ifdef CONFIG_STATIC_USERMODEHELPER
 393        sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
 394#else
 395        sub_info->path = path;
 396#endif
 397        sub_info->argv = argv;
 398        sub_info->envp = envp;
 399
 400        sub_info->cleanup = cleanup;
 401        sub_info->init = init;
 402        sub_info->data = data;
 403  out:
 404        return sub_info;
 405}
 406EXPORT_SYMBOL(call_usermodehelper_setup);
 407
 408struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
 409                int (*init)(struct subprocess_info *info, struct cred *new),
 410                void (*cleanup)(struct subprocess_info *info), void *data)
 411{
 412        struct subprocess_info *sub_info;
 413        struct umh_info *info = data;
 414        const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper";
 415
 416        sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL);
 417        if (!sub_info)
 418                return NULL;
 419
 420        sub_info->argv = argv_split(GFP_KERNEL, cmdline, NULL);
 421        if (!sub_info->argv) {
 422                kfree(sub_info);
 423                return NULL;
 424        }
 425
 426        INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
 427        sub_info->path = "none";
 428        sub_info->file = file;
 429        sub_info->init = init;
 430        sub_info->cleanup = cleanup;
 431        sub_info->data = data;
 432        return sub_info;
 433}
 434
 435static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
 436{
 437        struct umh_info *umh_info = info->data;
 438        struct file *from_umh[2];
 439        struct file *to_umh[2];
 440        int err;
 441
 442        /* create pipe to send data to umh */
 443        err = create_pipe_files(to_umh, 0);
 444        if (err)
 445                return err;
 446        err = replace_fd(0, to_umh[0], 0);
 447        fput(to_umh[0]);
 448        if (err < 0) {
 449                fput(to_umh[1]);
 450                return err;
 451        }
 452
 453        /* create pipe to receive data from umh */
 454        err = create_pipe_files(from_umh, 0);
 455        if (err) {
 456                fput(to_umh[1]);
 457                replace_fd(0, NULL, 0);
 458                return err;
 459        }
 460        err = replace_fd(1, from_umh[1], 0);
 461        fput(from_umh[1]);
 462        if (err < 0) {
 463                fput(to_umh[1]);
 464                replace_fd(0, NULL, 0);
 465                fput(from_umh[0]);
 466                return err;
 467        }
 468
 469        umh_info->pipe_to_umh = to_umh[1];
 470        umh_info->pipe_from_umh = from_umh[0];
 471        return 0;
 472}
 473
 474static void umh_clean_and_save_pid(struct subprocess_info *info)
 475{
 476        struct umh_info *umh_info = info->data;
 477
 478        argv_free(info->argv);
 479        umh_info->pid = info->pid;
 480}
 481
 482/**
 483 * fork_usermode_blob - fork a blob of bytes as a usermode process
 484 * @data: a blob of bytes that can be do_execv-ed as a file
 485 * @len: length of the blob
 486 * @info: information about usermode process (shouldn't be NULL)
 487 *
 488 * If info->cmdline is set it will be used as command line for the
 489 * user process, else "usermodehelper" is used.
 490 *
 491 * Returns either negative error or zero which indicates success
 492 * in executing a blob of bytes as a usermode process. In such
 493 * case 'struct umh_info *info' is populated with two pipes
 494 * and a pid of the process. The caller is responsible for health
 495 * check of the user process, killing it via pid, and closing the
 496 * pipes when user process is no longer needed.
 497 */
 498int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
 499{
 500        struct subprocess_info *sub_info;
 501        struct file *file;
 502        ssize_t written;
 503        loff_t pos = 0;
 504        int err;
 505
 506        file = shmem_kernel_file_setup("", len, 0);
 507        if (IS_ERR(file))
 508                return PTR_ERR(file);
 509
 510        written = kernel_write(file, data, len, &pos);
 511        if (written != len) {
 512                err = written;
 513                if (err >= 0)
 514                        err = -ENOMEM;
 515                goto out;
 516        }
 517
 518        err = -ENOMEM;
 519        sub_info = call_usermodehelper_setup_file(file, umh_pipe_setup,
 520                                                  umh_clean_and_save_pid, info);
 521        if (!sub_info)
 522                goto out;
 523
 524        err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
 525        if (!err) {
 526                mutex_lock(&umh_list_lock);
 527                list_add(&info->list, &umh_list);
 528                mutex_unlock(&umh_list_lock);
 529        }
 530out:
 531        fput(file);
 532        return err;
 533}
 534EXPORT_SYMBOL_GPL(fork_usermode_blob);
 535
 536/**
 537 * call_usermodehelper_exec - start a usermode application
 538 * @sub_info: information about the subprocessa
 539 * @wait: wait for the application to finish and return status.
 540 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
 541 *        when the program couldn't be exec'ed. This makes it safe to call
 542 *        from interrupt context.
 543 *
 544 * Runs a user-space application.  The application is started
 545 * asynchronously if wait is not set, and runs as a child of system workqueues.
 546 * (ie. it runs with full root capabilities and optimized affinity).
 547 */
 548int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
 549{
 550        DECLARE_COMPLETION_ONSTACK(done);
 551        int retval = 0;
 552
 553        if (!sub_info->path) {
 554                call_usermodehelper_freeinfo(sub_info);
 555                return -EINVAL;
 556        }
 557        helper_lock();
 558        if (usermodehelper_disabled) {
 559                retval = -EBUSY;
 560                goto out;
 561        }
 562
 563        /*
 564         * If there is no binary for us to call, then just return and get out of
 565         * here.  This allows us to set STATIC_USERMODEHELPER_PATH to "" and
 566         * disable all call_usermodehelper() calls.
 567         */
 568        if (strlen(sub_info->path) == 0)
 569                goto out;
 570
 571        /*
 572         * Set the completion pointer only if there is a waiter.
 573         * This makes it possible to use umh_complete to free
 574         * the data structure in case of UMH_NO_WAIT.
 575         */
 576        sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
 577        sub_info->wait = wait;
 578
 579        queue_work(system_unbound_wq, &sub_info->work);
 580        if (wait == UMH_NO_WAIT)        /* task has freed sub_info */
 581                goto unlock;
 582
 583        if (wait & UMH_KILLABLE) {
 584                retval = wait_for_completion_killable(&done);
 585                if (!retval)
 586                        goto wait_done;
 587
 588                /* umh_complete() will see NULL and free sub_info */
 589                if (xchg(&sub_info->complete, NULL))
 590                        goto unlock;
 591                /* fallthrough, umh_complete() was already called */
 592        }
 593
 594        wait_for_completion(&done);
 595wait_done:
 596        retval = sub_info->retval;
 597out:
 598        call_usermodehelper_freeinfo(sub_info);
 599unlock:
 600        helper_unlock();
 601        return retval;
 602}
 603EXPORT_SYMBOL(call_usermodehelper_exec);
 604
 605/**
 606 * call_usermodehelper() - prepare and start a usermode application
 607 * @path: path to usermode executable
 608 * @argv: arg vector for process
 609 * @envp: environment for process
 610 * @wait: wait for the application to finish and return status.
 611 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
 612 *        when the program couldn't be exec'ed. This makes it safe to call
 613 *        from interrupt context.
 614 *
 615 * This function is the equivalent to use call_usermodehelper_setup() and
 616 * call_usermodehelper_exec().
 617 */
 618int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
 619{
 620        struct subprocess_info *info;
 621        gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
 622
 623        info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
 624                                         NULL, NULL, NULL);
 625        if (info == NULL)
 626                return -ENOMEM;
 627
 628        return call_usermodehelper_exec(info, wait);
 629}
 630EXPORT_SYMBOL(call_usermodehelper);
 631
 632static int proc_cap_handler(struct ctl_table *table, int write,
 633                         void __user *buffer, size_t *lenp, loff_t *ppos)
 634{
 635        struct ctl_table t;
 636        unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
 637        kernel_cap_t new_cap;
 638        int err, i;
 639
 640        if (write && (!capable(CAP_SETPCAP) ||
 641                      !capable(CAP_SYS_MODULE)))
 642                return -EPERM;
 643
 644        /*
 645         * convert from the global kernel_cap_t to the ulong array to print to
 646         * userspace if this is a read.
 647         */
 648        spin_lock(&umh_sysctl_lock);
 649        for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
 650                if (table->data == CAP_BSET)
 651                        cap_array[i] = usermodehelper_bset.cap[i];
 652                else if (table->data == CAP_PI)
 653                        cap_array[i] = usermodehelper_inheritable.cap[i];
 654                else
 655                        BUG();
 656        }
 657        spin_unlock(&umh_sysctl_lock);
 658
 659        t = *table;
 660        t.data = &cap_array;
 661
 662        /*
 663         * actually read or write and array of ulongs from userspace.  Remember
 664         * these are least significant 32 bits first
 665         */
 666        err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
 667        if (err < 0)
 668                return err;
 669
 670        /*
 671         * convert from the sysctl array of ulongs to the kernel_cap_t
 672         * internal representation
 673         */
 674        for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
 675                new_cap.cap[i] = cap_array[i];
 676
 677        /*
 678         * Drop everything not in the new_cap (but don't add things)
 679         */
 680        if (write) {
 681                spin_lock(&umh_sysctl_lock);
 682                if (table->data == CAP_BSET)
 683                        usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
 684                if (table->data == CAP_PI)
 685                        usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
 686                spin_unlock(&umh_sysctl_lock);
 687        }
 688
 689        return 0;
 690}
 691
 692void __exit_umh(struct task_struct *tsk)
 693{
 694        struct umh_info *info;
 695        pid_t pid = tsk->pid;
 696
 697        mutex_lock(&umh_list_lock);
 698        list_for_each_entry(info, &umh_list, list) {
 699                if (info->pid == pid) {
 700                        list_del(&info->list);
 701                        mutex_unlock(&umh_list_lock);
 702                        goto out;
 703                }
 704        }
 705        mutex_unlock(&umh_list_lock);
 706        return;
 707out:
 708        if (info->cleanup)
 709                info->cleanup(info);
 710}
 711
 712struct ctl_table usermodehelper_table[] = {
 713        {
 714                .procname       = "bset",
 715                .data           = CAP_BSET,
 716                .maxlen         = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
 717                .mode           = 0600,
 718                .proc_handler   = proc_cap_handler,
 719        },
 720        {
 721                .procname       = "inheritable",
 722                .data           = CAP_PI,
 723                .maxlen         = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
 724                .mode           = 0600,
 725                .proc_handler   = proc_cap_handler,
 726        },
 727        { }
 728};
 729