linux/kernel/kexec.c
<<
>>
Prefs
   1/*
   2 * kexec.c - kexec_load system call
   3 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   4 *
   5 * This source code is licensed under the GNU General Public License,
   6 * Version 2.  See the file COPYING for more details.
   7 */
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/kexec.h>
  13#include <linux/mutex.h>
  14#include <linux/list.h>
  15#include <linux/syscalls.h>
  16#include <linux/vmalloc.h>
  17#include <linux/slab.h>
  18#include <linux/security.h>
  19
  20#include "kexec_internal.h"
  21
  22static int copy_user_segment_list(struct kimage *image,
  23                                  unsigned long nr_segments,
  24                                  struct kexec_segment __user *segments)
  25{
  26        int ret;
  27        size_t segment_bytes;
  28
  29        /* Read in the segments */
  30        image->nr_segments = nr_segments;
  31        segment_bytes = nr_segments * sizeof(*segments);
  32        ret = copy_from_user(image->segment, segments, segment_bytes);
  33        if (ret)
  34                ret = -EFAULT;
  35
  36        return ret;
  37}
  38
  39static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
  40                             unsigned long nr_segments,
  41                             struct kexec_segment __user *segments,
  42                             unsigned long flags)
  43{
  44        int ret;
  45        struct kimage *image;
  46        bool kexec_on_panic = flags & KEXEC_ON_CRASH;
  47
  48        if (kexec_on_panic) {
  49                /* Verify we have a valid entry point */
  50                if ((entry < crashk_res.start) || (entry > crashk_res.end))
  51                        return -EADDRNOTAVAIL;
  52        }
  53
  54        /* Allocate and initialize a controlling structure */
  55        image = do_kimage_alloc_init();
  56        if (!image)
  57                return -ENOMEM;
  58
  59        image->start = entry;
  60
  61        ret = copy_user_segment_list(image, nr_segments, segments);
  62        if (ret)
  63                goto out_free_image;
  64
  65        ret = sanity_check_segment_list(image);
  66        if (ret)
  67                goto out_free_image;
  68
  69         /* Enable the special crash kernel control page allocation policy. */
  70        if (kexec_on_panic) {
  71                image->control_page = crashk_res.start;
  72                image->type = KEXEC_TYPE_CRASH;
  73        }
  74
  75        /*
  76         * Find a location for the control code buffer, and add it
  77         * the vector of segments so that it's pages will also be
  78         * counted as destination pages.
  79         */
  80        ret = -ENOMEM;
  81        image->control_code_page = kimage_alloc_control_pages(image,
  82                                           get_order(KEXEC_CONTROL_PAGE_SIZE));
  83        if (!image->control_code_page) {
  84                pr_err("Could not allocate control_code_buffer\n");
  85                goto out_free_image;
  86        }
  87
  88        if (!kexec_on_panic) {
  89                image->swap_page = kimage_alloc_control_pages(image, 0);
  90                if (!image->swap_page) {
  91                        pr_err("Could not allocate swap buffer\n");
  92                        goto out_free_control_pages;
  93                }
  94        }
  95
  96        *rimage = image;
  97        return 0;
  98out_free_control_pages:
  99        kimage_free_page_list(&image->control_pages);
 100out_free_image:
 101        kfree(image);
 102        return ret;
 103}
 104
 105static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
 106                struct kexec_segment __user *segments, unsigned long flags)
 107{
 108        struct kimage **dest_image, *image;
 109        unsigned long i;
 110        int ret;
 111
 112        if (flags & KEXEC_ON_CRASH) {
 113                dest_image = &kexec_crash_image;
 114                if (kexec_crash_image)
 115                        arch_kexec_unprotect_crashkres();
 116        } else {
 117                dest_image = &kexec_image;
 118        }
 119
 120        if (nr_segments == 0) {
 121                /* Uninstall image */
 122                kimage_free(xchg(dest_image, NULL));
 123                return 0;
 124        }
 125        if (flags & KEXEC_ON_CRASH) {
 126                /*
 127                 * Loading another kernel to switch to if this one
 128                 * crashes.  Free any current crash dump kernel before
 129                 * we corrupt it.
 130                 */
 131                kimage_free(xchg(&kexec_crash_image, NULL));
 132        }
 133
 134        ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags);
 135        if (ret)
 136                return ret;
 137
 138        if (flags & KEXEC_PRESERVE_CONTEXT)
 139                image->preserve_context = 1;
 140
 141        ret = machine_kexec_prepare(image);
 142        if (ret)
 143                goto out;
 144
 145        for (i = 0; i < nr_segments; i++) {
 146                ret = kimage_load_segment(image, &image->segment[i]);
 147                if (ret)
 148                        goto out;
 149        }
 150
 151        kimage_terminate(image);
 152
 153        /* Install the new kernel and uninstall the old */
 154        image = xchg(dest_image, image);
 155
 156out:
 157        if ((flags & KEXEC_ON_CRASH) && kexec_crash_image)
 158                arch_kexec_protect_crashkres();
 159
 160        kimage_free(image);
 161        return ret;
 162}
 163
 164/*
 165 * Exec Kernel system call: for obvious reasons only root may call it.
 166 *
 167 * This call breaks up into three pieces.
 168 * - A generic part which loads the new kernel from the current
 169 *   address space, and very carefully places the data in the
 170 *   allocated pages.
 171 *
 172 * - A generic part that interacts with the kernel and tells all of
 173 *   the devices to shut down.  Preventing on-going dmas, and placing
 174 *   the devices in a consistent state so a later kernel can
 175 *   reinitialize them.
 176 *
 177 * - A machine specific part that includes the syscall number
 178 *   and then copies the image to it's final destination.  And
 179 *   jumps into the image at entry.
 180 *
 181 * kexec does not sync, or unmount filesystems so if you need
 182 * that to happen you need to do that yourself.
 183 */
 184
 185SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
 186                struct kexec_segment __user *, segments, unsigned long, flags)
 187{
 188        int result;
 189
 190        /* We only trust the superuser with rebooting the system. */
 191        if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
 192                return -EPERM;
 193
 194        if (get_securelevel() > 0)
 195                return -EPERM;
 196
 197        /*
 198         * Verify we have a legal set of flags
 199         * This leaves us room for future extensions.
 200         */
 201        if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
 202                return -EINVAL;
 203
 204        /* Verify we are on the appropriate architecture */
 205        if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
 206                ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
 207                return -EINVAL;
 208
 209        /* Put an artificial cap on the number
 210         * of segments passed to kexec_load.
 211         */
 212        if (nr_segments > KEXEC_SEGMENT_MAX)
 213                return -EINVAL;
 214
 215        /* Because we write directly to the reserved memory
 216         * region when loading crash kernels we need a mutex here to
 217         * prevent multiple crash  kernels from attempting to load
 218         * simultaneously, and to prevent a crash kernel from loading
 219         * over the top of a in use crash kernel.
 220         *
 221         * KISS: always take the mutex.
 222         */
 223        if (!mutex_trylock(&kexec_mutex))
 224                return -EBUSY;
 225
 226        result = do_kexec_load(entry, nr_segments, segments, flags);
 227
 228        mutex_unlock(&kexec_mutex);
 229
 230        return result;
 231}
 232
 233#ifdef CONFIG_COMPAT
 234COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
 235                       compat_ulong_t, nr_segments,
 236                       struct compat_kexec_segment __user *, segments,
 237                       compat_ulong_t, flags)
 238{
 239        struct compat_kexec_segment in;
 240        struct kexec_segment out, __user *ksegments;
 241        unsigned long i, result;
 242
 243        /* Don't allow clients that don't understand the native
 244         * architecture to do anything.
 245         */
 246        if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
 247                return -EINVAL;
 248
 249        if (nr_segments > KEXEC_SEGMENT_MAX)
 250                return -EINVAL;
 251
 252        ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
 253        for (i = 0; i < nr_segments; i++) {
 254                result = copy_from_user(&in, &segments[i], sizeof(in));
 255                if (result)
 256                        return -EFAULT;
 257
 258                out.buf   = compat_ptr(in.buf);
 259                out.bufsz = in.bufsz;
 260                out.mem   = in.mem;
 261                out.memsz = in.memsz;
 262
 263                result = copy_to_user(&ksegments[i], &out, sizeof(out));
 264                if (result)
 265                        return -EFAULT;
 266        }
 267
 268        return sys_kexec_load(entry, nr_segments, ksegments, flags);
 269}
 270#endif
 271