linux/kernel/kmod.c
<<
>>
Prefs
   1/*
   2        kmod, the new module loader (replaces kerneld)
   3        Kirk Petersen
   4
   5        Reorganized not to be a daemon by Adam Richter, with guidance
   6        from Greg Zornetzer.
   7
   8        Modified to avoid chroot and file sharing problems.
   9        Mikael Pettersson
  10
  11        Limit the concurrent number of kmod modprobes to catch loops from
  12        "modprobe needs a service that is in a module".
  13        Keith Owens <kaos@ocs.com.au> December 1999
  14
  15        Unblock all signals when we exec a usermode process.
  16        Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
  17
  18        call_usermodehelper wait flag, and remove exec_usermodehelper.
  19        Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
  20*/
  21#include <linux/module.h>
  22#include <linux/sched.h>
  23#include <linux/syscalls.h>
  24#include <linux/unistd.h>
  25#include <linux/kmod.h>
  26#include <linux/slab.h>
  27#include <linux/completion.h>
  28#include <linux/cred.h>
  29#include <linux/file.h>
  30#include <linux/fdtable.h>
  31#include <linux/workqueue.h>
  32#include <linux/security.h>
  33#include <linux/mount.h>
  34#include <linux/kernel.h>
  35#include <linux/init.h>
  36#include <linux/resource.h>
  37#include <linux/notifier.h>
  38#include <linux/suspend.h>
  39#include <linux/rwsem.h>
  40#include <linux/ptrace.h>
  41#include <linux/async.h>
  42#include <asm/uaccess.h>
  43
  44#include <trace/events/module.h>
  45
  46extern int max_threads;
  47
  48#define CAP_BSET        (void *)1
  49#define CAP_PI          (void *)2
  50
  51static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
  52static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
  53static DEFINE_SPINLOCK(umh_sysctl_lock);
  54static DECLARE_RWSEM(umhelper_sem);
  55
  56#ifdef CONFIG_MODULES
  57
  58/*
  59        modprobe_path is set via /proc/sys.
  60*/
  61char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
  62
  63static void free_modprobe_argv(struct subprocess_info *info)
  64{
  65        kfree(info->argv[3]); /* check call_modprobe() */
  66        kfree(info->argv);
  67}
  68
  69static int call_modprobe(char *module_name, int wait)
  70{
  71        struct subprocess_info *info;
  72        static char *envp[] = {
  73                "HOME=/",
  74                "TERM=linux",
  75                "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  76                NULL
  77        };
  78
  79        char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
  80        if (!argv)
  81                goto out;
  82
  83        module_name = kstrdup(module_name, GFP_KERNEL);
  84        if (!module_name)
  85                goto free_argv;
  86
  87        argv[0] = modprobe_path;
  88        argv[1] = "-q";
  89        argv[2] = "--";
  90        argv[3] = module_name;  /* check free_modprobe_argv() */
  91        argv[4] = NULL;
  92
  93        info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
  94                                         NULL, free_modprobe_argv, NULL);
  95        if (!info)
  96                goto free_module_name;
  97
  98        return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
  99
 100free_module_name:
 101        kfree(module_name);
 102free_argv:
 103        kfree(argv);
 104out:
 105        return -ENOMEM;
 106}
 107
 108/**
 109 * __request_module - try to load a kernel module
 110 * @wait: wait (or not) for the operation to complete
 111 * @fmt: printf style format string for the name of the module
 112 * @...: arguments as specified in the format string
 113 *
 114 * Load a module using the user mode module loader. The function returns
 115 * zero on success or a negative errno code or positive exit code from
 116 * "modprobe" on failure. Note that a successful module load does not mean
 117 * the module did not then unload and exit on an error of its own. Callers
 118 * must check that the service they requested is now available not blindly
 119 * invoke it.
 120 *
 121 * If module auto-loading support is disabled then this function
 122 * becomes a no-operation.
 123 */
 124int __request_module(bool wait, const char *fmt, ...)
 125{
 126        va_list args;
 127        char module_name[MODULE_NAME_LEN];
 128        unsigned int max_modprobes;
 129        int ret;
 130        static atomic_t kmod_concurrent = ATOMIC_INIT(0);
 131#define MAX_KMOD_CONCURRENT 50  /* Completely arbitrary value - KAO */
 132        static int kmod_loop_msg;
 133
 134        /*
 135         * We don't allow synchronous module loading from async.  Module
 136         * init may invoke async_synchronize_full() which will end up
 137         * waiting for this task which already is waiting for the module
 138         * loading to complete, leading to a deadlock.
 139         */
 140        WARN_ON_ONCE(wait && current_is_async());
 141
 142        if (!modprobe_path[0])
 143                return 0;
 144
 145        va_start(args, fmt);
 146        ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
 147        va_end(args);
 148        if (ret >= MODULE_NAME_LEN)
 149                return -ENAMETOOLONG;
 150
 151        ret = security_kernel_module_request(module_name);
 152        if (ret)
 153                return ret;
 154
 155        /* If modprobe needs a service that is in a module, we get a recursive
 156         * loop.  Limit the number of running kmod threads to max_threads/2 or
 157         * MAX_KMOD_CONCURRENT, whichever is the smaller.  A cleaner method
 158         * would be to run the parents of this process, counting how many times
 159         * kmod was invoked.  That would mean accessing the internals of the
 160         * process tables to get the command line, proc_pid_cmdline is static
 161         * and it is not worth changing the proc code just to handle this case. 
 162         * KAO.
 163         *
 164         * "trace the ppid" is simple, but will fail if someone's
 165         * parent exits.  I think this is as good as it gets. --RR
 166         */
 167        max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
 168        atomic_inc(&kmod_concurrent);
 169        if (atomic_read(&kmod_concurrent) > max_modprobes) {
 170                /* We may be blaming an innocent here, but unlikely */
 171                if (kmod_loop_msg < 5) {
 172                        printk(KERN_ERR
 173                               "request_module: runaway loop modprobe %s\n",
 174                               module_name);
 175                        kmod_loop_msg++;
 176                }
 177                atomic_dec(&kmod_concurrent);
 178                return -ENOMEM;
 179        }
 180
 181        trace_module_request(module_name, wait, _RET_IP_);
 182
 183        ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
 184
 185        atomic_dec(&kmod_concurrent);
 186        return ret;
 187}
 188EXPORT_SYMBOL(__request_module);
 189#endif /* CONFIG_MODULES */
 190
 191static void call_usermodehelper_freeinfo(struct subprocess_info *info)
 192{
 193        if (info->cleanup)
 194                (*info->cleanup)(info);
 195        kfree(info);
 196}
 197
 198static void umh_complete(struct subprocess_info *sub_info)
 199{
 200        struct completion *comp = xchg(&sub_info->complete, NULL);
 201        /*
 202         * See call_usermodehelper_exec(). If xchg() returns NULL
 203         * we own sub_info, the UMH_KILLABLE caller has gone away
 204         * or the caller used UMH_NO_WAIT.
 205         */
 206        if (comp)
 207                complete(comp);
 208        else
 209                call_usermodehelper_freeinfo(sub_info);
 210}
 211
 212/*
 213 * This is the task which runs the usermode application
 214 */
 215static int call_usermodehelper_exec_async(void *data)
 216{
 217        struct subprocess_info *sub_info = data;
 218        struct cred *new;
 219        int retval;
 220
 221        spin_lock_irq(&current->sighand->siglock);
 222        flush_signal_handlers(current, 1);
 223        spin_unlock_irq(&current->sighand->siglock);
 224
 225        /*
 226         * Our parent (unbound workqueue) runs with elevated scheduling
 227         * priority. Avoid propagating that into the userspace child.
 228         */
 229        set_user_nice(current, 0);
 230
 231        retval = -ENOMEM;
 232        new = prepare_kernel_cred(current);
 233        if (!new)
 234                goto out;
 235
 236        spin_lock(&umh_sysctl_lock);
 237        new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
 238        new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
 239                                             new->cap_inheritable);
 240        spin_unlock(&umh_sysctl_lock);
 241
 242        if (sub_info->init) {
 243                retval = sub_info->init(sub_info, new);
 244                if (retval) {
 245                        abort_creds(new);
 246                        goto out;
 247                }
 248        }
 249
 250        commit_creds(new);
 251
 252        retval = do_execve(getname_kernel(sub_info->path),
 253                           (const char __user *const __user *)sub_info->argv,
 254                           (const char __user *const __user *)sub_info->envp);
 255out:
 256        sub_info->retval = retval;
 257        /*
 258         * call_usermodehelper_exec_sync() will call umh_complete
 259         * if UHM_WAIT_PROC.
 260         */
 261        if (!(sub_info->wait & UMH_WAIT_PROC))
 262                umh_complete(sub_info);
 263        if (!retval)
 264                return 0;
 265        do_exit(0);
 266}
 267
 268/* Handles UMH_WAIT_PROC.  */
 269static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
 270{
 271        pid_t pid;
 272
 273        /* If SIGCLD is ignored sys_wait4 won't populate the status. */
 274        kernel_sigaction(SIGCHLD, SIG_DFL);
 275        pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
 276        if (pid < 0) {
 277                sub_info->retval = pid;
 278        } else {
 279                int ret = -ECHILD;
 280                /*
 281                 * Normally it is bogus to call wait4() from in-kernel because
 282                 * wait4() wants to write the exit code to a userspace address.
 283                 * But call_usermodehelper_exec_sync() always runs as kernel
 284                 * thread (workqueue) and put_user() to a kernel address works
 285                 * OK for kernel threads, due to their having an mm_segment_t
 286                 * which spans the entire address space.
 287                 *
 288                 * Thus the __user pointer cast is valid here.
 289                 */
 290                sys_wait4(pid, (int __user *)&ret, 0, NULL);
 291
 292                /*
 293                 * If ret is 0, either call_usermodehelper_exec_async failed and
 294                 * the real error code is already in sub_info->retval or
 295                 * sub_info->retval is 0 anyway, so don't mess with it then.
 296                 */
 297                if (ret)
 298                        sub_info->retval = ret;
 299        }
 300
 301        /* Restore default kernel sig handler */
 302        kernel_sigaction(SIGCHLD, SIG_IGN);
 303
 304        umh_complete(sub_info);
 305}
 306
 307/*
 308 * We need to create the usermodehelper kernel thread from a task that is affine
 309 * to an optimized set of CPUs (or nohz housekeeping ones) such that they
 310 * inherit a widest affinity irrespective of call_usermodehelper() callers with
 311 * possibly reduced affinity (eg: per-cpu workqueues). We don't want
 312 * usermodehelper targets to contend a busy CPU.
 313 *
 314 * Unbound workqueues provide such wide affinity and allow to block on
 315 * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
 316 *
 317 * Besides, workqueues provide the privilege level that caller might not have
 318 * to perform the usermodehelper request.
 319 *
 320 */
 321static void call_usermodehelper_exec_work(struct work_struct *work)
 322{
 323        struct subprocess_info *sub_info =
 324                container_of(work, struct subprocess_info, work);
 325
 326        if (sub_info->wait & UMH_WAIT_PROC) {
 327                call_usermodehelper_exec_sync(sub_info);
 328        } else {
 329                pid_t pid;
 330                /*
 331                 * Use CLONE_PARENT to reparent it to kthreadd; we do not
 332                 * want to pollute current->children, and we need a parent
 333                 * that always ignores SIGCHLD to ensure auto-reaping.
 334                 */
 335                pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
 336                                    CLONE_PARENT | SIGCHLD);
 337                if (pid < 0) {
 338                        sub_info->retval = pid;
 339                        umh_complete(sub_info);
 340                }
 341        }
 342}
 343
 344/*
 345 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
 346 * (used for preventing user land processes from being created after the user
 347 * land has been frozen during a system-wide hibernation or suspend operation).
 348 * Should always be manipulated under umhelper_sem acquired for write.
 349 */
 350static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
 351
 352/* Number of helpers running */
 353static atomic_t running_helpers = ATOMIC_INIT(0);
 354
 355/*
 356 * Wait queue head used by usermodehelper_disable() to wait for all running
 357 * helpers to finish.
 358 */
 359static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
 360
 361/*
 362 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
 363 * to become 'false'.
 364 */
 365static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
 366
 367/*
 368 * Time to wait for running_helpers to become zero before the setting of
 369 * usermodehelper_disabled in usermodehelper_disable() fails
 370 */
 371#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
 372
 373int usermodehelper_read_trylock(void)
 374{
 375        DEFINE_WAIT(wait);
 376        int ret = 0;
 377
 378        down_read(&umhelper_sem);
 379        for (;;) {
 380                prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
 381                                TASK_INTERRUPTIBLE);
 382                if (!usermodehelper_disabled)
 383                        break;
 384
 385                if (usermodehelper_disabled == UMH_DISABLED)
 386                        ret = -EAGAIN;
 387
 388                up_read(&umhelper_sem);
 389
 390                if (ret)
 391                        break;
 392
 393                schedule();
 394                try_to_freeze();
 395
 396                down_read(&umhelper_sem);
 397        }
 398        finish_wait(&usermodehelper_disabled_waitq, &wait);
 399        return ret;
 400}
 401EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
 402
 403long usermodehelper_read_lock_wait(long timeout)
 404{
 405        DEFINE_WAIT(wait);
 406
 407        if (timeout < 0)
 408                return -EINVAL;
 409
 410        down_read(&umhelper_sem);
 411        for (;;) {
 412                prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
 413                                TASK_UNINTERRUPTIBLE);
 414                if (!usermodehelper_disabled)
 415                        break;
 416
 417                up_read(&umhelper_sem);
 418
 419                timeout = schedule_timeout(timeout);
 420                if (!timeout)
 421                        break;
 422
 423                down_read(&umhelper_sem);
 424        }
 425        finish_wait(&usermodehelper_disabled_waitq, &wait);
 426        return timeout;
 427}
 428EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
 429
 430void usermodehelper_read_unlock(void)
 431{
 432        up_read(&umhelper_sem);
 433}
 434EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
 435
 436/**
 437 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
 438 * @depth: New value to assign to usermodehelper_disabled.
 439 *
 440 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
 441 * writing) and wakeup tasks waiting for it to change.
 442 */
 443void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
 444{
 445        down_write(&umhelper_sem);
 446        usermodehelper_disabled = depth;
 447        wake_up(&usermodehelper_disabled_waitq);
 448        up_write(&umhelper_sem);
 449}
 450
 451/**
 452 * __usermodehelper_disable - Prevent new helpers from being started.
 453 * @depth: New value to assign to usermodehelper_disabled.
 454 *
 455 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
 456 */
 457int __usermodehelper_disable(enum umh_disable_depth depth)
 458{
 459        long retval;
 460
 461        if (!depth)
 462                return -EINVAL;
 463
 464        down_write(&umhelper_sem);
 465        usermodehelper_disabled = depth;
 466        up_write(&umhelper_sem);
 467
 468        /*
 469         * From now on call_usermodehelper_exec() won't start any new
 470         * helpers, so it is sufficient if running_helpers turns out to
 471         * be zero at one point (it may be increased later, but that
 472         * doesn't matter).
 473         */
 474        retval = wait_event_timeout(running_helpers_waitq,
 475                                        atomic_read(&running_helpers) == 0,
 476                                        RUNNING_HELPERS_TIMEOUT);
 477        if (retval)
 478                return 0;
 479
 480        __usermodehelper_set_disable_depth(UMH_ENABLED);
 481        return -EAGAIN;
 482}
 483
 484static void helper_lock(void)
 485{
 486        atomic_inc(&running_helpers);
 487        smp_mb__after_atomic();
 488}
 489
 490static void helper_unlock(void)
 491{
 492        if (atomic_dec_and_test(&running_helpers))
 493                wake_up(&running_helpers_waitq);
 494}
 495
 496/**
 497 * call_usermodehelper_setup - prepare to call a usermode helper
 498 * @path: path to usermode executable
 499 * @argv: arg vector for process
 500 * @envp: environment for process
 501 * @gfp_mask: gfp mask for memory allocation
 502 * @cleanup: a cleanup function
 503 * @init: an init function
 504 * @data: arbitrary context sensitive data
 505 *
 506 * Returns either %NULL on allocation failure, or a subprocess_info
 507 * structure.  This should be passed to call_usermodehelper_exec to
 508 * exec the process and free the structure.
 509 *
 510 * The init function is used to customize the helper process prior to
 511 * exec.  A non-zero return code causes the process to error out, exit,
 512 * and return the failure to the calling process
 513 *
 514 * The cleanup function is just before ethe subprocess_info is about to
 515 * be freed.  This can be used for freeing the argv and envp.  The
 516 * Function must be runnable in either a process context or the
 517 * context in which call_usermodehelper_exec is called.
 518 */
 519struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
 520                char **envp, gfp_t gfp_mask,
 521                int (*init)(struct subprocess_info *info, struct cred *new),
 522                void (*cleanup)(struct subprocess_info *info),
 523                void *data)
 524{
 525        struct subprocess_info *sub_info;
 526        sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
 527        if (!sub_info)
 528                goto out;
 529
 530        INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
 531        sub_info->path = path;
 532        sub_info->argv = argv;
 533        sub_info->envp = envp;
 534
 535        sub_info->cleanup = cleanup;
 536        sub_info->init = init;
 537        sub_info->data = data;
 538  out:
 539        return sub_info;
 540}
 541EXPORT_SYMBOL(call_usermodehelper_setup);
 542
 543/**
 544 * call_usermodehelper_exec - start a usermode application
 545 * @sub_info: information about the subprocessa
 546 * @wait: wait for the application to finish and return status.
 547 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
 548 *        when the program couldn't be exec'ed. This makes it safe to call
 549 *        from interrupt context.
 550 *
 551 * Runs a user-space application.  The application is started
 552 * asynchronously if wait is not set, and runs as a child of system workqueues.
 553 * (ie. it runs with full root capabilities and optimized affinity).
 554 */
 555int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
 556{
 557        DECLARE_COMPLETION_ONSTACK(done);
 558        int retval = 0;
 559
 560        if (!sub_info->path) {
 561                call_usermodehelper_freeinfo(sub_info);
 562                return -EINVAL;
 563        }
 564        helper_lock();
 565        if (usermodehelper_disabled) {
 566                retval = -EBUSY;
 567                goto out;
 568        }
 569        /*
 570         * Set the completion pointer only if there is a waiter.
 571         * This makes it possible to use umh_complete to free
 572         * the data structure in case of UMH_NO_WAIT.
 573         */
 574        sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
 575        sub_info->wait = wait;
 576
 577        queue_work(system_unbound_wq, &sub_info->work);
 578        if (wait == UMH_NO_WAIT)        /* task has freed sub_info */
 579                goto unlock;
 580
 581        if (wait & UMH_KILLABLE) {
 582                retval = wait_for_completion_killable(&done);
 583                if (!retval)
 584                        goto wait_done;
 585
 586                /* umh_complete() will see NULL and free sub_info */
 587                if (xchg(&sub_info->complete, NULL))
 588                        goto unlock;
 589                /* fallthrough, umh_complete() was already called */
 590        }
 591
 592        wait_for_completion(&done);
 593wait_done:
 594        retval = sub_info->retval;
 595out:
 596        call_usermodehelper_freeinfo(sub_info);
 597unlock:
 598        helper_unlock();
 599        return retval;
 600}
 601EXPORT_SYMBOL(call_usermodehelper_exec);
 602
 603/**
 604 * call_usermodehelper() - prepare and start a usermode application
 605 * @path: path to usermode executable
 606 * @argv: arg vector for process
 607 * @envp: environment for process
 608 * @wait: wait for the application to finish and return status.
 609 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
 610 *        when the program couldn't be exec'ed. This makes it safe to call
 611 *        from interrupt context.
 612 *
 613 * This function is the equivalent to use call_usermodehelper_setup() and
 614 * call_usermodehelper_exec().
 615 */
 616int call_usermodehelper(char *path, char **argv, char **envp, int wait)
 617{
 618        struct subprocess_info *info;
 619        gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
 620
 621        info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
 622                                         NULL, NULL, NULL);
 623        if (info == NULL)
 624                return -ENOMEM;
 625
 626        return call_usermodehelper_exec(info, wait);
 627}
 628EXPORT_SYMBOL(call_usermodehelper);
 629
 630static int proc_cap_handler(struct ctl_table *table, int write,
 631                         void __user *buffer, size_t *lenp, loff_t *ppos)
 632{
 633        struct ctl_table t;
 634        unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
 635        kernel_cap_t new_cap;
 636        int err, i;
 637
 638        if (write && (!capable(CAP_SETPCAP) ||
 639                      !capable(CAP_SYS_MODULE)))
 640                return -EPERM;
 641
 642        /*
 643         * convert from the global kernel_cap_t to the ulong array to print to
 644         * userspace if this is a read.
 645         */
 646        spin_lock(&umh_sysctl_lock);
 647        for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
 648                if (table->data == CAP_BSET)
 649                        cap_array[i] = usermodehelper_bset.cap[i];
 650                else if (table->data == CAP_PI)
 651                        cap_array[i] = usermodehelper_inheritable.cap[i];
 652                else
 653                        BUG();
 654        }
 655        spin_unlock(&umh_sysctl_lock);
 656
 657        t = *table;
 658        t.data = &cap_array;
 659
 660        /*
 661         * actually read or write and array of ulongs from userspace.  Remember
 662         * these are least significant 32 bits first
 663         */
 664        err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
 665        if (err < 0)
 666                return err;
 667
 668        /*
 669         * convert from the sysctl array of ulongs to the kernel_cap_t
 670         * internal representation
 671         */
 672        for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
 673                new_cap.cap[i] = cap_array[i];
 674
 675        /*
 676         * Drop everything not in the new_cap (but don't add things)
 677         */
 678        spin_lock(&umh_sysctl_lock);
 679        if (write) {
 680                if (table->data == CAP_BSET)
 681                        usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
 682                if (table->data == CAP_PI)
 683                        usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
 684        }
 685        spin_unlock(&umh_sysctl_lock);
 686
 687        return 0;
 688}
 689
 690struct ctl_table usermodehelper_table[] = {
 691        {
 692                .procname       = "bset",
 693                .data           = CAP_BSET,
 694                .maxlen         = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
 695                .mode           = 0600,
 696                .proc_handler   = proc_cap_handler,
 697        },
 698        {
 699                .procname       = "inheritable",
 700                .data           = CAP_PI,
 701                .maxlen         = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
 702                .mode           = 0600,
 703                .proc_handler   = proc_cap_handler,
 704        },
 705        { }
 706};
 707