linux/kernel/kexec_core.c
<<
>>
Prefs
   1/*
   2 * kexec.c - kexec system call core code.
   3 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   4 *
   5 * This source code is licensed under the GNU General Public License,
   6 * Version 2.  See the file COPYING for more details.
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/capability.h>
  12#include <linux/mm.h>
  13#include <linux/file.h>
  14#include <linux/slab.h>
  15#include <linux/fs.h>
  16#include <linux/kexec.h>
  17#include <linux/mutex.h>
  18#include <linux/list.h>
  19#include <linux/highmem.h>
  20#include <linux/syscalls.h>
  21#include <linux/reboot.h>
  22#include <linux/ioport.h>
  23#include <linux/hardirq.h>
  24#include <linux/elf.h>
  25#include <linux/elfcore.h>
  26#include <linux/utsname.h>
  27#include <linux/numa.h>
  28#include <linux/suspend.h>
  29#include <linux/device.h>
  30#include <linux/freezer.h>
  31#include <linux/pm.h>
  32#include <linux/cpu.h>
  33#include <linux/uaccess.h>
  34#include <linux/io.h>
  35#include <linux/console.h>
  36#include <linux/vmalloc.h>
  37#include <linux/swap.h>
  38#include <linux/syscore_ops.h>
  39#include <linux/compiler.h>
  40#include <linux/hugetlb.h>
  41#include <linux/frame.h>
  42
  43#include <asm/page.h>
  44#include <asm/sections.h>
  45
  46#include <crypto/hash.h>
  47#include <crypto/sha.h>
  48#include "kexec_internal.h"
  49
  50DEFINE_MUTEX(kexec_mutex);
  51
  52/* Per cpu memory for storing cpu states in case of system crash. */
  53note_buf_t __percpu *crash_notes;
  54
  55/* Flag to indicate we are going to kexec a new kernel */
  56bool kexec_in_progress = false;
  57
  58
  59/* Location of the reserved area for the crash kernel */
  60struct resource crashk_res = {
  61        .name  = "Crash kernel",
  62        .start = 0,
  63        .end   = 0,
  64        .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  65        .desc  = IORES_DESC_CRASH_KERNEL
  66};
  67struct resource crashk_low_res = {
  68        .name  = "Crash kernel",
  69        .start = 0,
  70        .end   = 0,
  71        .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  72        .desc  = IORES_DESC_CRASH_KERNEL
  73};
  74
  75int kexec_should_crash(struct task_struct *p)
  76{
  77        /*
  78         * If crash_kexec_post_notifiers is enabled, don't run
  79         * crash_kexec() here yet, which must be run after panic
  80         * notifiers in panic().
  81         */
  82        if (crash_kexec_post_notifiers)
  83                return 0;
  84        /*
  85         * There are 4 panic() calls in do_exit() path, each of which
  86         * corresponds to each of these 4 conditions.
  87         */
  88        if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  89                return 1;
  90        return 0;
  91}
  92
  93int kexec_crash_loaded(void)
  94{
  95        return !!kexec_crash_image;
  96}
  97EXPORT_SYMBOL_GPL(kexec_crash_loaded);
  98
  99/*
 100 * When kexec transitions to the new kernel there is a one-to-one
 101 * mapping between physical and virtual addresses.  On processors
 102 * where you can disable the MMU this is trivial, and easy.  For
 103 * others it is still a simple predictable page table to setup.
 104 *
 105 * In that environment kexec copies the new kernel to its final
 106 * resting place.  This means I can only support memory whose
 107 * physical address can fit in an unsigned long.  In particular
 108 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 109 * If the assembly stub has more restrictive requirements
 110 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 111 * defined more restrictively in <asm/kexec.h>.
 112 *
 113 * The code for the transition from the current kernel to the
 114 * the new kernel is placed in the control_code_buffer, whose size
 115 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
 116 * page of memory is necessary, but some architectures require more.
 117 * Because this memory must be identity mapped in the transition from
 118 * virtual to physical addresses it must live in the range
 119 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 120 * modifiable.
 121 *
 122 * The assembly stub in the control code buffer is passed a linked list
 123 * of descriptor pages detailing the source pages of the new kernel,
 124 * and the destination addresses of those source pages.  As this data
 125 * structure is not used in the context of the current OS, it must
 126 * be self-contained.
 127 *
 128 * The code has been made to work with highmem pages and will use a
 129 * destination page in its final resting place (if it happens
 130 * to allocate it).  The end product of this is that most of the
 131 * physical address space, and most of RAM can be used.
 132 *
 133 * Future directions include:
 134 *  - allocating a page table with the control code buffer identity
 135 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 136 *    reliable.
 137 */
 138
 139/*
 140 * KIMAGE_NO_DEST is an impossible destination address..., for
 141 * allocating pages whose destination address we do not care about.
 142 */
 143#define KIMAGE_NO_DEST (-1UL)
 144#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
 145
 146static struct page *kimage_alloc_page(struct kimage *image,
 147                                       gfp_t gfp_mask,
 148                                       unsigned long dest);
 149
 150int sanity_check_segment_list(struct kimage *image)
 151{
 152        int i;
 153        unsigned long nr_segments = image->nr_segments;
 154        unsigned long total_pages = 0;
 155
 156        /*
 157         * Verify we have good destination addresses.  The caller is
 158         * responsible for making certain we don't attempt to load
 159         * the new image into invalid or reserved areas of RAM.  This
 160         * just verifies it is an address we can use.
 161         *
 162         * Since the kernel does everything in page size chunks ensure
 163         * the destination addresses are page aligned.  Too many
 164         * special cases crop of when we don't do this.  The most
 165         * insidious is getting overlapping destination addresses
 166         * simply because addresses are changed to page size
 167         * granularity.
 168         */
 169        for (i = 0; i < nr_segments; i++) {
 170                unsigned long mstart, mend;
 171
 172                mstart = image->segment[i].mem;
 173                mend   = mstart + image->segment[i].memsz;
 174                if (mstart > mend)
 175                        return -EADDRNOTAVAIL;
 176                if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 177                        return -EADDRNOTAVAIL;
 178                if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 179                        return -EADDRNOTAVAIL;
 180        }
 181
 182        /* Verify our destination addresses do not overlap.
 183         * If we alloed overlapping destination addresses
 184         * through very weird things can happen with no
 185         * easy explanation as one segment stops on another.
 186         */
 187        for (i = 0; i < nr_segments; i++) {
 188                unsigned long mstart, mend;
 189                unsigned long j;
 190
 191                mstart = image->segment[i].mem;
 192                mend   = mstart + image->segment[i].memsz;
 193                for (j = 0; j < i; j++) {
 194                        unsigned long pstart, pend;
 195
 196                        pstart = image->segment[j].mem;
 197                        pend   = pstart + image->segment[j].memsz;
 198                        /* Do the segments overlap ? */
 199                        if ((mend > pstart) && (mstart < pend))
 200                                return -EINVAL;
 201                }
 202        }
 203
 204        /* Ensure our buffer sizes are strictly less than
 205         * our memory sizes.  This should always be the case,
 206         * and it is easier to check up front than to be surprised
 207         * later on.
 208         */
 209        for (i = 0; i < nr_segments; i++) {
 210                if (image->segment[i].bufsz > image->segment[i].memsz)
 211                        return -EINVAL;
 212        }
 213
 214        /*
 215         * Verify that no more than half of memory will be consumed. If the
 216         * request from userspace is too large, a large amount of time will be
 217         * wasted allocating pages, which can cause a soft lockup.
 218         */
 219        for (i = 0; i < nr_segments; i++) {
 220                if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
 221                        return -EINVAL;
 222
 223                total_pages += PAGE_COUNT(image->segment[i].memsz);
 224        }
 225
 226        if (total_pages > totalram_pages / 2)
 227                return -EINVAL;
 228
 229        /*
 230         * Verify we have good destination addresses.  Normally
 231         * the caller is responsible for making certain we don't
 232         * attempt to load the new image into invalid or reserved
 233         * areas of RAM.  But crash kernels are preloaded into a
 234         * reserved area of ram.  We must ensure the addresses
 235         * are in the reserved area otherwise preloading the
 236         * kernel could corrupt things.
 237         */
 238
 239        if (image->type == KEXEC_TYPE_CRASH) {
 240                for (i = 0; i < nr_segments; i++) {
 241                        unsigned long mstart, mend;
 242
 243                        mstart = image->segment[i].mem;
 244                        mend = mstart + image->segment[i].memsz - 1;
 245                        /* Ensure we are within the crash kernel limits */
 246                        if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
 247                            (mend > phys_to_boot_phys(crashk_res.end)))
 248                                return -EADDRNOTAVAIL;
 249                }
 250        }
 251
 252        return 0;
 253}
 254
 255struct kimage *do_kimage_alloc_init(void)
 256{
 257        struct kimage *image;
 258
 259        /* Allocate a controlling structure */
 260        image = kzalloc(sizeof(*image), GFP_KERNEL);
 261        if (!image)
 262                return NULL;
 263
 264        image->head = 0;
 265        image->entry = &image->head;
 266        image->last_entry = &image->head;
 267        image->control_page = ~0; /* By default this does not apply */
 268        image->type = KEXEC_TYPE_DEFAULT;
 269
 270        /* Initialize the list of control pages */
 271        INIT_LIST_HEAD(&image->control_pages);
 272
 273        /* Initialize the list of destination pages */
 274        INIT_LIST_HEAD(&image->dest_pages);
 275
 276        /* Initialize the list of unusable pages */
 277        INIT_LIST_HEAD(&image->unusable_pages);
 278
 279        return image;
 280}
 281
 282int kimage_is_destination_range(struct kimage *image,
 283                                        unsigned long start,
 284                                        unsigned long end)
 285{
 286        unsigned long i;
 287
 288        for (i = 0; i < image->nr_segments; i++) {
 289                unsigned long mstart, mend;
 290
 291                mstart = image->segment[i].mem;
 292                mend = mstart + image->segment[i].memsz;
 293                if ((end > mstart) && (start < mend))
 294                        return 1;
 295        }
 296
 297        return 0;
 298}
 299
 300static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 301{
 302        struct page *pages;
 303
 304        pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
 305        if (pages) {
 306                unsigned int count, i;
 307
 308                pages->mapping = NULL;
 309                set_page_private(pages, order);
 310                count = 1 << order;
 311                for (i = 0; i < count; i++)
 312                        SetPageReserved(pages + i);
 313
 314                arch_kexec_post_alloc_pages(page_address(pages), count,
 315                                            gfp_mask);
 316
 317                if (gfp_mask & __GFP_ZERO)
 318                        for (i = 0; i < count; i++)
 319                                clear_highpage(pages + i);
 320        }
 321
 322        return pages;
 323}
 324
 325static void kimage_free_pages(struct page *page)
 326{
 327        unsigned int order, count, i;
 328
 329        order = page_private(page);
 330        count = 1 << order;
 331
 332        arch_kexec_pre_free_pages(page_address(page), count);
 333
 334        for (i = 0; i < count; i++)
 335                ClearPageReserved(page + i);
 336        __free_pages(page, order);
 337}
 338
 339void kimage_free_page_list(struct list_head *list)
 340{
 341        struct page *page, *next;
 342
 343        list_for_each_entry_safe(page, next, list, lru) {
 344                list_del(&page->lru);
 345                kimage_free_pages(page);
 346        }
 347}
 348
 349static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 350                                                        unsigned int order)
 351{
 352        /* Control pages are special, they are the intermediaries
 353         * that are needed while we copy the rest of the pages
 354         * to their final resting place.  As such they must
 355         * not conflict with either the destination addresses
 356         * or memory the kernel is already using.
 357         *
 358         * The only case where we really need more than one of
 359         * these are for architectures where we cannot disable
 360         * the MMU and must instead generate an identity mapped
 361         * page table for all of the memory.
 362         *
 363         * At worst this runs in O(N) of the image size.
 364         */
 365        struct list_head extra_pages;
 366        struct page *pages;
 367        unsigned int count;
 368
 369        count = 1 << order;
 370        INIT_LIST_HEAD(&extra_pages);
 371
 372        /* Loop while I can allocate a page and the page allocated
 373         * is a destination page.
 374         */
 375        do {
 376                unsigned long pfn, epfn, addr, eaddr;
 377
 378                pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
 379                if (!pages)
 380                        break;
 381                pfn   = page_to_boot_pfn(pages);
 382                epfn  = pfn + count;
 383                addr  = pfn << PAGE_SHIFT;
 384                eaddr = epfn << PAGE_SHIFT;
 385                if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 386                              kimage_is_destination_range(image, addr, eaddr)) {
 387                        list_add(&pages->lru, &extra_pages);
 388                        pages = NULL;
 389                }
 390        } while (!pages);
 391
 392        if (pages) {
 393                /* Remember the allocated page... */
 394                list_add(&pages->lru, &image->control_pages);
 395
 396                /* Because the page is already in it's destination
 397                 * location we will never allocate another page at
 398                 * that address.  Therefore kimage_alloc_pages
 399                 * will not return it (again) and we don't need
 400                 * to give it an entry in image->segment[].
 401                 */
 402        }
 403        /* Deal with the destination pages I have inadvertently allocated.
 404         *
 405         * Ideally I would convert multi-page allocations into single
 406         * page allocations, and add everything to image->dest_pages.
 407         *
 408         * For now it is simpler to just free the pages.
 409         */
 410        kimage_free_page_list(&extra_pages);
 411
 412        return pages;
 413}
 414
 415static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 416                                                      unsigned int order)
 417{
 418        /* Control pages are special, they are the intermediaries
 419         * that are needed while we copy the rest of the pages
 420         * to their final resting place.  As such they must
 421         * not conflict with either the destination addresses
 422         * or memory the kernel is already using.
 423         *
 424         * Control pages are also the only pags we must allocate
 425         * when loading a crash kernel.  All of the other pages
 426         * are specified by the segments and we just memcpy
 427         * into them directly.
 428         *
 429         * The only case where we really need more than one of
 430         * these are for architectures where we cannot disable
 431         * the MMU and must instead generate an identity mapped
 432         * page table for all of the memory.
 433         *
 434         * Given the low demand this implements a very simple
 435         * allocator that finds the first hole of the appropriate
 436         * size in the reserved memory region, and allocates all
 437         * of the memory up to and including the hole.
 438         */
 439        unsigned long hole_start, hole_end, size;
 440        struct page *pages;
 441
 442        pages = NULL;
 443        size = (1 << order) << PAGE_SHIFT;
 444        hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 445        hole_end   = hole_start + size - 1;
 446        while (hole_end <= crashk_res.end) {
 447                unsigned long i;
 448
 449                cond_resched();
 450
 451                if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 452                        break;
 453                /* See if I overlap any of the segments */
 454                for (i = 0; i < image->nr_segments; i++) {
 455                        unsigned long mstart, mend;
 456
 457                        mstart = image->segment[i].mem;
 458                        mend   = mstart + image->segment[i].memsz - 1;
 459                        if ((hole_end >= mstart) && (hole_start <= mend)) {
 460                                /* Advance the hole to the end of the segment */
 461                                hole_start = (mend + (size - 1)) & ~(size - 1);
 462                                hole_end   = hole_start + size - 1;
 463                                break;
 464                        }
 465                }
 466                /* If I don't overlap any segments I have found my hole! */
 467                if (i == image->nr_segments) {
 468                        pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 469                        image->control_page = hole_end;
 470                        break;
 471                }
 472        }
 473
 474        return pages;
 475}
 476
 477
 478struct page *kimage_alloc_control_pages(struct kimage *image,
 479                                         unsigned int order)
 480{
 481        struct page *pages = NULL;
 482
 483        switch (image->type) {
 484        case KEXEC_TYPE_DEFAULT:
 485                pages = kimage_alloc_normal_control_pages(image, order);
 486                break;
 487        case KEXEC_TYPE_CRASH:
 488                pages = kimage_alloc_crash_control_pages(image, order);
 489                break;
 490        }
 491
 492        return pages;
 493}
 494
 495int kimage_crash_copy_vmcoreinfo(struct kimage *image)
 496{
 497        struct page *vmcoreinfo_page;
 498        void *safecopy;
 499
 500        if (image->type != KEXEC_TYPE_CRASH)
 501                return 0;
 502
 503        /*
 504         * For kdump, allocate one vmcoreinfo safe copy from the
 505         * crash memory. as we have arch_kexec_protect_crashkres()
 506         * after kexec syscall, we naturally protect it from write
 507         * (even read) access under kernel direct mapping. But on
 508         * the other hand, we still need to operate it when crash
 509         * happens to generate vmcoreinfo note, hereby we rely on
 510         * vmap for this purpose.
 511         */
 512        vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
 513        if (!vmcoreinfo_page) {
 514                pr_warn("Could not allocate vmcoreinfo buffer\n");
 515                return -ENOMEM;
 516        }
 517        safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
 518        if (!safecopy) {
 519                pr_warn("Could not vmap vmcoreinfo buffer\n");
 520                return -ENOMEM;
 521        }
 522
 523        image->vmcoreinfo_data_copy = safecopy;
 524        crash_update_vmcoreinfo_safecopy(safecopy);
 525
 526        return 0;
 527}
 528
 529static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 530{
 531        if (*image->entry != 0)
 532                image->entry++;
 533
 534        if (image->entry == image->last_entry) {
 535                kimage_entry_t *ind_page;
 536                struct page *page;
 537
 538                page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 539                if (!page)
 540                        return -ENOMEM;
 541
 542                ind_page = page_address(page);
 543                *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
 544                image->entry = ind_page;
 545                image->last_entry = ind_page +
 546                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 547        }
 548        *image->entry = entry;
 549        image->entry++;
 550        *image->entry = 0;
 551
 552        return 0;
 553}
 554
 555static int kimage_set_destination(struct kimage *image,
 556                                   unsigned long destination)
 557{
 558        int result;
 559
 560        destination &= PAGE_MASK;
 561        result = kimage_add_entry(image, destination | IND_DESTINATION);
 562
 563        return result;
 564}
 565
 566
 567static int kimage_add_page(struct kimage *image, unsigned long page)
 568{
 569        int result;
 570
 571        page &= PAGE_MASK;
 572        result = kimage_add_entry(image, page | IND_SOURCE);
 573
 574        return result;
 575}
 576
 577
 578static void kimage_free_extra_pages(struct kimage *image)
 579{
 580        /* Walk through and free any extra destination pages I may have */
 581        kimage_free_page_list(&image->dest_pages);
 582
 583        /* Walk through and free any unusable pages I have cached */
 584        kimage_free_page_list(&image->unusable_pages);
 585
 586}
 587void kimage_terminate(struct kimage *image)
 588{
 589        if (*image->entry != 0)
 590                image->entry++;
 591
 592        *image->entry = IND_DONE;
 593}
 594
 595#define for_each_kimage_entry(image, ptr, entry) \
 596        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 597                ptr = (entry & IND_INDIRECTION) ? \
 598                        boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
 599
 600static void kimage_free_entry(kimage_entry_t entry)
 601{
 602        struct page *page;
 603
 604        page = boot_pfn_to_page(entry >> PAGE_SHIFT);
 605        kimage_free_pages(page);
 606}
 607
 608void kimage_free(struct kimage *image)
 609{
 610        kimage_entry_t *ptr, entry;
 611        kimage_entry_t ind = 0;
 612
 613        if (!image)
 614                return;
 615
 616        if (image->vmcoreinfo_data_copy) {
 617                crash_update_vmcoreinfo_safecopy(NULL);
 618                vunmap(image->vmcoreinfo_data_copy);
 619        }
 620
 621        kimage_free_extra_pages(image);
 622        for_each_kimage_entry(image, ptr, entry) {
 623                if (entry & IND_INDIRECTION) {
 624                        /* Free the previous indirection page */
 625                        if (ind & IND_INDIRECTION)
 626                                kimage_free_entry(ind);
 627                        /* Save this indirection page until we are
 628                         * done with it.
 629                         */
 630                        ind = entry;
 631                } else if (entry & IND_SOURCE)
 632                        kimage_free_entry(entry);
 633        }
 634        /* Free the final indirection page */
 635        if (ind & IND_INDIRECTION)
 636                kimage_free_entry(ind);
 637
 638        /* Handle any machine specific cleanup */
 639        machine_kexec_cleanup(image);
 640
 641        /* Free the kexec control pages... */
 642        kimage_free_page_list(&image->control_pages);
 643
 644        /*
 645         * Free up any temporary buffers allocated. This might hit if
 646         * error occurred much later after buffer allocation.
 647         */
 648        if (image->file_mode)
 649                kimage_file_post_load_cleanup(image);
 650
 651        kfree(image);
 652}
 653
 654static kimage_entry_t *kimage_dst_used(struct kimage *image,
 655                                        unsigned long page)
 656{
 657        kimage_entry_t *ptr, entry;
 658        unsigned long destination = 0;
 659
 660        for_each_kimage_entry(image, ptr, entry) {
 661                if (entry & IND_DESTINATION)
 662                        destination = entry & PAGE_MASK;
 663                else if (entry & IND_SOURCE) {
 664                        if (page == destination)
 665                                return ptr;
 666                        destination += PAGE_SIZE;
 667                }
 668        }
 669
 670        return NULL;
 671}
 672
 673static struct page *kimage_alloc_page(struct kimage *image,
 674                                        gfp_t gfp_mask,
 675                                        unsigned long destination)
 676{
 677        /*
 678         * Here we implement safeguards to ensure that a source page
 679         * is not copied to its destination page before the data on
 680         * the destination page is no longer useful.
 681         *
 682         * To do this we maintain the invariant that a source page is
 683         * either its own destination page, or it is not a
 684         * destination page at all.
 685         *
 686         * That is slightly stronger than required, but the proof
 687         * that no problems will not occur is trivial, and the
 688         * implementation is simply to verify.
 689         *
 690         * When allocating all pages normally this algorithm will run
 691         * in O(N) time, but in the worst case it will run in O(N^2)
 692         * time.   If the runtime is a problem the data structures can
 693         * be fixed.
 694         */
 695        struct page *page;
 696        unsigned long addr;
 697
 698        /*
 699         * Walk through the list of destination pages, and see if I
 700         * have a match.
 701         */
 702        list_for_each_entry(page, &image->dest_pages, lru) {
 703                addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 704                if (addr == destination) {
 705                        list_del(&page->lru);
 706                        return page;
 707                }
 708        }
 709        page = NULL;
 710        while (1) {
 711                kimage_entry_t *old;
 712
 713                /* Allocate a page, if we run out of memory give up */
 714                page = kimage_alloc_pages(gfp_mask, 0);
 715                if (!page)
 716                        return NULL;
 717                /* If the page cannot be used file it away */
 718                if (page_to_boot_pfn(page) >
 719                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 720                        list_add(&page->lru, &image->unusable_pages);
 721                        continue;
 722                }
 723                addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 724
 725                /* If it is the destination page we want use it */
 726                if (addr == destination)
 727                        break;
 728
 729                /* If the page is not a destination page use it */
 730                if (!kimage_is_destination_range(image, addr,
 731                                                  addr + PAGE_SIZE))
 732                        break;
 733
 734                /*
 735                 * I know that the page is someones destination page.
 736                 * See if there is already a source page for this
 737                 * destination page.  And if so swap the source pages.
 738                 */
 739                old = kimage_dst_used(image, addr);
 740                if (old) {
 741                        /* If so move it */
 742                        unsigned long old_addr;
 743                        struct page *old_page;
 744
 745                        old_addr = *old & PAGE_MASK;
 746                        old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
 747                        copy_highpage(page, old_page);
 748                        *old = addr | (*old & ~PAGE_MASK);
 749
 750                        /* The old page I have found cannot be a
 751                         * destination page, so return it if it's
 752                         * gfp_flags honor the ones passed in.
 753                         */
 754                        if (!(gfp_mask & __GFP_HIGHMEM) &&
 755                            PageHighMem(old_page)) {
 756                                kimage_free_pages(old_page);
 757                                continue;
 758                        }
 759                        addr = old_addr;
 760                        page = old_page;
 761                        break;
 762                }
 763                /* Place the page on the destination list, to be used later */
 764                list_add(&page->lru, &image->dest_pages);
 765        }
 766
 767        return page;
 768}
 769
 770static int kimage_load_normal_segment(struct kimage *image,
 771                                         struct kexec_segment *segment)
 772{
 773        unsigned long maddr;
 774        size_t ubytes, mbytes;
 775        int result;
 776        unsigned char __user *buf = NULL;
 777        unsigned char *kbuf = NULL;
 778
 779        result = 0;
 780        if (image->file_mode)
 781                kbuf = segment->kbuf;
 782        else
 783                buf = segment->buf;
 784        ubytes = segment->bufsz;
 785        mbytes = segment->memsz;
 786        maddr = segment->mem;
 787
 788        result = kimage_set_destination(image, maddr);
 789        if (result < 0)
 790                goto out;
 791
 792        while (mbytes) {
 793                struct page *page;
 794                char *ptr;
 795                size_t uchunk, mchunk;
 796
 797                page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 798                if (!page) {
 799                        result  = -ENOMEM;
 800                        goto out;
 801                }
 802                result = kimage_add_page(image, page_to_boot_pfn(page)
 803                                                                << PAGE_SHIFT);
 804                if (result < 0)
 805                        goto out;
 806
 807                ptr = kmap(page);
 808                /* Start with a clear page */
 809                clear_page(ptr);
 810                ptr += maddr & ~PAGE_MASK;
 811                mchunk = min_t(size_t, mbytes,
 812                                PAGE_SIZE - (maddr & ~PAGE_MASK));
 813                uchunk = min(ubytes, mchunk);
 814
 815                /* For file based kexec, source pages are in kernel memory */
 816                if (image->file_mode)
 817                        memcpy(ptr, kbuf, uchunk);
 818                else
 819                        result = copy_from_user(ptr, buf, uchunk);
 820                kunmap(page);
 821                if (result) {
 822                        result = -EFAULT;
 823                        goto out;
 824                }
 825                ubytes -= uchunk;
 826                maddr  += mchunk;
 827                if (image->file_mode)
 828                        kbuf += mchunk;
 829                else
 830                        buf += mchunk;
 831                mbytes -= mchunk;
 832        }
 833out:
 834        return result;
 835}
 836
 837static int kimage_load_crash_segment(struct kimage *image,
 838                                        struct kexec_segment *segment)
 839{
 840        /* For crash dumps kernels we simply copy the data from
 841         * user space to it's destination.
 842         * We do things a page at a time for the sake of kmap.
 843         */
 844        unsigned long maddr;
 845        size_t ubytes, mbytes;
 846        int result;
 847        unsigned char __user *buf = NULL;
 848        unsigned char *kbuf = NULL;
 849
 850        result = 0;
 851        if (image->file_mode)
 852                kbuf = segment->kbuf;
 853        else
 854                buf = segment->buf;
 855        ubytes = segment->bufsz;
 856        mbytes = segment->memsz;
 857        maddr = segment->mem;
 858        while (mbytes) {
 859                struct page *page;
 860                char *ptr;
 861                size_t uchunk, mchunk;
 862
 863                page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
 864                if (!page) {
 865                        result  = -ENOMEM;
 866                        goto out;
 867                }
 868                ptr = kmap(page);
 869                ptr += maddr & ~PAGE_MASK;
 870                mchunk = min_t(size_t, mbytes,
 871                                PAGE_SIZE - (maddr & ~PAGE_MASK));
 872                uchunk = min(ubytes, mchunk);
 873                if (mchunk > uchunk) {
 874                        /* Zero the trailing part of the page */
 875                        memset(ptr + uchunk, 0, mchunk - uchunk);
 876                }
 877
 878                /* For file based kexec, source pages are in kernel memory */
 879                if (image->file_mode)
 880                        memcpy(ptr, kbuf, uchunk);
 881                else
 882                        result = copy_from_user(ptr, buf, uchunk);
 883                kexec_flush_icache_page(page);
 884                kunmap(page);
 885                if (result) {
 886                        result = -EFAULT;
 887                        goto out;
 888                }
 889                ubytes -= uchunk;
 890                maddr  += mchunk;
 891                if (image->file_mode)
 892                        kbuf += mchunk;
 893                else
 894                        buf += mchunk;
 895                mbytes -= mchunk;
 896        }
 897out:
 898        return result;
 899}
 900
 901int kimage_load_segment(struct kimage *image,
 902                                struct kexec_segment *segment)
 903{
 904        int result = -ENOMEM;
 905
 906        switch (image->type) {
 907        case KEXEC_TYPE_DEFAULT:
 908                result = kimage_load_normal_segment(image, segment);
 909                break;
 910        case KEXEC_TYPE_CRASH:
 911                result = kimage_load_crash_segment(image, segment);
 912                break;
 913        }
 914
 915        return result;
 916}
 917
 918struct kimage *kexec_image;
 919struct kimage *kexec_crash_image;
 920int kexec_load_disabled;
 921
 922/*
 923 * No panic_cpu check version of crash_kexec().  This function is called
 924 * only when panic_cpu holds the current CPU number; this is the only CPU
 925 * which processes crash_kexec routines.
 926 */
 927void __noclone __crash_kexec(struct pt_regs *regs)
 928{
 929        /* Take the kexec_mutex here to prevent sys_kexec_load
 930         * running on one cpu from replacing the crash kernel
 931         * we are using after a panic on a different cpu.
 932         *
 933         * If the crash kernel was not located in a fixed area
 934         * of memory the xchg(&kexec_crash_image) would be
 935         * sufficient.  But since I reuse the memory...
 936         */
 937        if (mutex_trylock(&kexec_mutex)) {
 938                if (kexec_crash_image) {
 939                        struct pt_regs fixed_regs;
 940
 941                        crash_setup_regs(&fixed_regs, regs);
 942                        crash_save_vmcoreinfo();
 943                        machine_crash_shutdown(&fixed_regs);
 944                        machine_kexec(kexec_crash_image);
 945                }
 946                mutex_unlock(&kexec_mutex);
 947        }
 948}
 949STACK_FRAME_NON_STANDARD(__crash_kexec);
 950
 951void crash_kexec(struct pt_regs *regs)
 952{
 953        int old_cpu, this_cpu;
 954
 955        /*
 956         * Only one CPU is allowed to execute the crash_kexec() code as with
 957         * panic().  Otherwise parallel calls of panic() and crash_kexec()
 958         * may stop each other.  To exclude them, we use panic_cpu here too.
 959         */
 960        this_cpu = raw_smp_processor_id();
 961        old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
 962        if (old_cpu == PANIC_CPU_INVALID) {
 963                /* This is the 1st CPU which comes here, so go ahead. */
 964                printk_safe_flush_on_panic();
 965                __crash_kexec(regs);
 966
 967                /*
 968                 * Reset panic_cpu to allow another panic()/crash_kexec()
 969                 * call.
 970                 */
 971                atomic_set(&panic_cpu, PANIC_CPU_INVALID);
 972        }
 973}
 974
 975size_t crash_get_memory_size(void)
 976{
 977        size_t size = 0;
 978
 979        mutex_lock(&kexec_mutex);
 980        if (crashk_res.end != crashk_res.start)
 981                size = resource_size(&crashk_res);
 982        mutex_unlock(&kexec_mutex);
 983        return size;
 984}
 985
 986void __weak crash_free_reserved_phys_range(unsigned long begin,
 987                                           unsigned long end)
 988{
 989        unsigned long addr;
 990
 991        for (addr = begin; addr < end; addr += PAGE_SIZE)
 992                free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
 993}
 994
 995int crash_shrink_memory(unsigned long new_size)
 996{
 997        int ret = 0;
 998        unsigned long start, end;
 999        unsigned long old_size;
1000        struct resource *ram_res;
1001
1002        mutex_lock(&kexec_mutex);
1003
1004        if (kexec_crash_image) {
1005                ret = -ENOENT;
1006                goto unlock;
1007        }
1008        start = crashk_res.start;
1009        end = crashk_res.end;
1010        old_size = (end == 0) ? 0 : end - start + 1;
1011        if (new_size >= old_size) {
1012                ret = (new_size == old_size) ? 0 : -EINVAL;
1013                goto unlock;
1014        }
1015
1016        ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1017        if (!ram_res) {
1018                ret = -ENOMEM;
1019                goto unlock;
1020        }
1021
1022        start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1023        end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1024
1025        crash_free_reserved_phys_range(end, crashk_res.end);
1026
1027        if ((start == end) && (crashk_res.parent != NULL))
1028                release_resource(&crashk_res);
1029
1030        ram_res->start = end;
1031        ram_res->end = crashk_res.end;
1032        ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1033        ram_res->name = "System RAM";
1034
1035        crashk_res.end = end - 1;
1036
1037        insert_resource(&iomem_resource, ram_res);
1038
1039unlock:
1040        mutex_unlock(&kexec_mutex);
1041        return ret;
1042}
1043
1044void crash_save_cpu(struct pt_regs *regs, int cpu)
1045{
1046        struct elf_prstatus prstatus;
1047        u32 *buf;
1048
1049        if ((cpu < 0) || (cpu >= nr_cpu_ids))
1050                return;
1051
1052        /* Using ELF notes here is opportunistic.
1053         * I need a well defined structure format
1054         * for the data I pass, and I need tags
1055         * on the data to indicate what information I have
1056         * squirrelled away.  ELF notes happen to provide
1057         * all of that, so there is no need to invent something new.
1058         */
1059        buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1060        if (!buf)
1061                return;
1062        memset(&prstatus, 0, sizeof(prstatus));
1063        prstatus.pr_pid = current->pid;
1064        elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1065        buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1066                              &prstatus, sizeof(prstatus));
1067        final_note(buf);
1068}
1069
1070static int __init crash_notes_memory_init(void)
1071{
1072        /* Allocate memory for saving cpu registers. */
1073        size_t size, align;
1074
1075        /*
1076         * crash_notes could be allocated across 2 vmalloc pages when percpu
1077         * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1078         * pages are also on 2 continuous physical pages. In this case the
1079         * 2nd part of crash_notes in 2nd page could be lost since only the
1080         * starting address and size of crash_notes are exported through sysfs.
1081         * Here round up the size of crash_notes to the nearest power of two
1082         * and pass it to __alloc_percpu as align value. This can make sure
1083         * crash_notes is allocated inside one physical page.
1084         */
1085        size = sizeof(note_buf_t);
1086        align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1087
1088        /*
1089         * Break compile if size is bigger than PAGE_SIZE since crash_notes
1090         * definitely will be in 2 pages with that.
1091         */
1092        BUILD_BUG_ON(size > PAGE_SIZE);
1093
1094        crash_notes = __alloc_percpu(size, align);
1095        if (!crash_notes) {
1096                pr_warn("Memory allocation for saving cpu register states failed\n");
1097                return -ENOMEM;
1098        }
1099        return 0;
1100}
1101subsys_initcall(crash_notes_memory_init);
1102
1103
1104/*
1105 * Move into place and start executing a preloaded standalone
1106 * executable.  If nothing was preloaded return an error.
1107 */
1108int kernel_kexec(void)
1109{
1110        int error = 0;
1111
1112        if (!mutex_trylock(&kexec_mutex))
1113                return -EBUSY;
1114        if (!kexec_image) {
1115                error = -EINVAL;
1116                goto Unlock;
1117        }
1118
1119#ifdef CONFIG_KEXEC_JUMP
1120        if (kexec_image->preserve_context) {
1121                lock_system_sleep();
1122                pm_prepare_console();
1123                error = freeze_processes();
1124                if (error) {
1125                        error = -EBUSY;
1126                        goto Restore_console;
1127                }
1128                suspend_console();
1129                error = dpm_suspend_start(PMSG_FREEZE);
1130                if (error)
1131                        goto Resume_console;
1132                /* At this point, dpm_suspend_start() has been called,
1133                 * but *not* dpm_suspend_end(). We *must* call
1134                 * dpm_suspend_end() now.  Otherwise, drivers for
1135                 * some devices (e.g. interrupt controllers) become
1136                 * desynchronized with the actual state of the
1137                 * hardware at resume time, and evil weirdness ensues.
1138                 */
1139                error = dpm_suspend_end(PMSG_FREEZE);
1140                if (error)
1141                        goto Resume_devices;
1142                error = disable_nonboot_cpus();
1143                if (error)
1144                        goto Enable_cpus;
1145                local_irq_disable();
1146                error = syscore_suspend();
1147                if (error)
1148                        goto Enable_irqs;
1149        } else
1150#endif
1151        {
1152                kexec_in_progress = true;
1153                kernel_restart_prepare(NULL);
1154                migrate_to_reboot_cpu();
1155
1156                /*
1157                 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1158                 * no further code needs to use CPU hotplug (which is true in
1159                 * the reboot case). However, the kexec path depends on using
1160                 * CPU hotplug again; so re-enable it here.
1161                 */
1162                cpu_hotplug_enable();
1163                pr_emerg("Starting new kernel\n");
1164                machine_shutdown();
1165        }
1166
1167        machine_kexec(kexec_image);
1168
1169#ifdef CONFIG_KEXEC_JUMP
1170        if (kexec_image->preserve_context) {
1171                syscore_resume();
1172 Enable_irqs:
1173                local_irq_enable();
1174 Enable_cpus:
1175                enable_nonboot_cpus();
1176                dpm_resume_start(PMSG_RESTORE);
1177 Resume_devices:
1178                dpm_resume_end(PMSG_RESTORE);
1179 Resume_console:
1180                resume_console();
1181                thaw_processes();
1182 Restore_console:
1183                pm_restore_console();
1184                unlock_system_sleep();
1185        }
1186#endif
1187
1188 Unlock:
1189        mutex_unlock(&kexec_mutex);
1190        return error;
1191}
1192
1193/*
1194 * Protection mechanism for crashkernel reserved memory after
1195 * the kdump kernel is loaded.
1196 *
1197 * Provide an empty default implementation here -- architecture
1198 * code may override this
1199 */
1200void __weak arch_kexec_protect_crashkres(void)
1201{}
1202
1203void __weak arch_kexec_unprotect_crashkres(void)
1204{}
1205