linux/kernel/kexec_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kexec.c - kexec system call core code.
   4 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/kexec.h>
  15#include <linux/mutex.h>
  16#include <linux/list.h>
  17#include <linux/highmem.h>
  18#include <linux/syscalls.h>
  19#include <linux/reboot.h>
  20#include <linux/ioport.h>
  21#include <linux/hardirq.h>
  22#include <linux/elf.h>
  23#include <linux/elfcore.h>
  24#include <linux/utsname.h>
  25#include <linux/numa.h>
  26#include <linux/suspend.h>
  27#include <linux/device.h>
  28#include <linux/freezer.h>
  29#include <linux/pm.h>
  30#include <linux/cpu.h>
  31#include <linux/uaccess.h>
  32#include <linux/io.h>
  33#include <linux/console.h>
  34#include <linux/vmalloc.h>
  35#include <linux/swap.h>
  36#include <linux/syscore_ops.h>
  37#include <linux/compiler.h>
  38#include <linux/hugetlb.h>
  39#include <linux/frame.h>
  40
  41#include <asm/page.h>
  42#include <asm/sections.h>
  43
  44#include <crypto/hash.h>
  45#include <crypto/sha.h>
  46#include "kexec_internal.h"
  47
  48DEFINE_MUTEX(kexec_mutex);
  49
  50/* Per cpu memory for storing cpu states in case of system crash. */
  51note_buf_t __percpu *crash_notes;
  52
  53/* Flag to indicate we are going to kexec a new kernel */
  54bool kexec_in_progress = false;
  55
  56
  57/* Location of the reserved area for the crash kernel */
  58struct resource crashk_res = {
  59        .name  = "Crash kernel",
  60        .start = 0,
  61        .end   = 0,
  62        .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  63        .desc  = IORES_DESC_CRASH_KERNEL
  64};
  65struct resource crashk_low_res = {
  66        .name  = "Crash kernel",
  67        .start = 0,
  68        .end   = 0,
  69        .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  70        .desc  = IORES_DESC_CRASH_KERNEL
  71};
  72
  73int kexec_should_crash(struct task_struct *p)
  74{
  75        /*
  76         * If crash_kexec_post_notifiers is enabled, don't run
  77         * crash_kexec() here yet, which must be run after panic
  78         * notifiers in panic().
  79         */
  80        if (crash_kexec_post_notifiers)
  81                return 0;
  82        /*
  83         * There are 4 panic() calls in do_exit() path, each of which
  84         * corresponds to each of these 4 conditions.
  85         */
  86        if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  87                return 1;
  88        return 0;
  89}
  90
  91int kexec_crash_loaded(void)
  92{
  93        return !!kexec_crash_image;
  94}
  95EXPORT_SYMBOL_GPL(kexec_crash_loaded);
  96
  97/*
  98 * When kexec transitions to the new kernel there is a one-to-one
  99 * mapping between physical and virtual addresses.  On processors
 100 * where you can disable the MMU this is trivial, and easy.  For
 101 * others it is still a simple predictable page table to setup.
 102 *
 103 * In that environment kexec copies the new kernel to its final
 104 * resting place.  This means I can only support memory whose
 105 * physical address can fit in an unsigned long.  In particular
 106 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 107 * If the assembly stub has more restrictive requirements
 108 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 109 * defined more restrictively in <asm/kexec.h>.
 110 *
 111 * The code for the transition from the current kernel to the
 112 * the new kernel is placed in the control_code_buffer, whose size
 113 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
 114 * page of memory is necessary, but some architectures require more.
 115 * Because this memory must be identity mapped in the transition from
 116 * virtual to physical addresses it must live in the range
 117 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 118 * modifiable.
 119 *
 120 * The assembly stub in the control code buffer is passed a linked list
 121 * of descriptor pages detailing the source pages of the new kernel,
 122 * and the destination addresses of those source pages.  As this data
 123 * structure is not used in the context of the current OS, it must
 124 * be self-contained.
 125 *
 126 * The code has been made to work with highmem pages and will use a
 127 * destination page in its final resting place (if it happens
 128 * to allocate it).  The end product of this is that most of the
 129 * physical address space, and most of RAM can be used.
 130 *
 131 * Future directions include:
 132 *  - allocating a page table with the control code buffer identity
 133 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 134 *    reliable.
 135 */
 136
 137/*
 138 * KIMAGE_NO_DEST is an impossible destination address..., for
 139 * allocating pages whose destination address we do not care about.
 140 */
 141#define KIMAGE_NO_DEST (-1UL)
 142#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
 143
 144static struct page *kimage_alloc_page(struct kimage *image,
 145                                       gfp_t gfp_mask,
 146                                       unsigned long dest);
 147
 148int sanity_check_segment_list(struct kimage *image)
 149{
 150        int i;
 151        unsigned long nr_segments = image->nr_segments;
 152        unsigned long total_pages = 0;
 153        unsigned long nr_pages = totalram_pages();
 154
 155        /*
 156         * Verify we have good destination addresses.  The caller is
 157         * responsible for making certain we don't attempt to load
 158         * the new image into invalid or reserved areas of RAM.  This
 159         * just verifies it is an address we can use.
 160         *
 161         * Since the kernel does everything in page size chunks ensure
 162         * the destination addresses are page aligned.  Too many
 163         * special cases crop of when we don't do this.  The most
 164         * insidious is getting overlapping destination addresses
 165         * simply because addresses are changed to page size
 166         * granularity.
 167         */
 168        for (i = 0; i < nr_segments; i++) {
 169                unsigned long mstart, mend;
 170
 171                mstart = image->segment[i].mem;
 172                mend   = mstart + image->segment[i].memsz;
 173                if (mstart > mend)
 174                        return -EADDRNOTAVAIL;
 175                if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 176                        return -EADDRNOTAVAIL;
 177                if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 178                        return -EADDRNOTAVAIL;
 179        }
 180
 181        /* Verify our destination addresses do not overlap.
 182         * If we alloed overlapping destination addresses
 183         * through very weird things can happen with no
 184         * easy explanation as one segment stops on another.
 185         */
 186        for (i = 0; i < nr_segments; i++) {
 187                unsigned long mstart, mend;
 188                unsigned long j;
 189
 190                mstart = image->segment[i].mem;
 191                mend   = mstart + image->segment[i].memsz;
 192                for (j = 0; j < i; j++) {
 193                        unsigned long pstart, pend;
 194
 195                        pstart = image->segment[j].mem;
 196                        pend   = pstart + image->segment[j].memsz;
 197                        /* Do the segments overlap ? */
 198                        if ((mend > pstart) && (mstart < pend))
 199                                return -EINVAL;
 200                }
 201        }
 202
 203        /* Ensure our buffer sizes are strictly less than
 204         * our memory sizes.  This should always be the case,
 205         * and it is easier to check up front than to be surprised
 206         * later on.
 207         */
 208        for (i = 0; i < nr_segments; i++) {
 209                if (image->segment[i].bufsz > image->segment[i].memsz)
 210                        return -EINVAL;
 211        }
 212
 213        /*
 214         * Verify that no more than half of memory will be consumed. If the
 215         * request from userspace is too large, a large amount of time will be
 216         * wasted allocating pages, which can cause a soft lockup.
 217         */
 218        for (i = 0; i < nr_segments; i++) {
 219                if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
 220                        return -EINVAL;
 221
 222                total_pages += PAGE_COUNT(image->segment[i].memsz);
 223        }
 224
 225        if (total_pages > nr_pages / 2)
 226                return -EINVAL;
 227
 228        /*
 229         * Verify we have good destination addresses.  Normally
 230         * the caller is responsible for making certain we don't
 231         * attempt to load the new image into invalid or reserved
 232         * areas of RAM.  But crash kernels are preloaded into a
 233         * reserved area of ram.  We must ensure the addresses
 234         * are in the reserved area otherwise preloading the
 235         * kernel could corrupt things.
 236         */
 237
 238        if (image->type == KEXEC_TYPE_CRASH) {
 239                for (i = 0; i < nr_segments; i++) {
 240                        unsigned long mstart, mend;
 241
 242                        mstart = image->segment[i].mem;
 243                        mend = mstart + image->segment[i].memsz - 1;
 244                        /* Ensure we are within the crash kernel limits */
 245                        if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
 246                            (mend > phys_to_boot_phys(crashk_res.end)))
 247                                return -EADDRNOTAVAIL;
 248                }
 249        }
 250
 251        return 0;
 252}
 253
 254struct kimage *do_kimage_alloc_init(void)
 255{
 256        struct kimage *image;
 257
 258        /* Allocate a controlling structure */
 259        image = kzalloc(sizeof(*image), GFP_KERNEL);
 260        if (!image)
 261                return NULL;
 262
 263        image->head = 0;
 264        image->entry = &image->head;
 265        image->last_entry = &image->head;
 266        image->control_page = ~0; /* By default this does not apply */
 267        image->type = KEXEC_TYPE_DEFAULT;
 268
 269        /* Initialize the list of control pages */
 270        INIT_LIST_HEAD(&image->control_pages);
 271
 272        /* Initialize the list of destination pages */
 273        INIT_LIST_HEAD(&image->dest_pages);
 274
 275        /* Initialize the list of unusable pages */
 276        INIT_LIST_HEAD(&image->unusable_pages);
 277
 278        return image;
 279}
 280
 281int kimage_is_destination_range(struct kimage *image,
 282                                        unsigned long start,
 283                                        unsigned long end)
 284{
 285        unsigned long i;
 286
 287        for (i = 0; i < image->nr_segments; i++) {
 288                unsigned long mstart, mend;
 289
 290                mstart = image->segment[i].mem;
 291                mend = mstart + image->segment[i].memsz;
 292                if ((end > mstart) && (start < mend))
 293                        return 1;
 294        }
 295
 296        return 0;
 297}
 298
 299static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 300{
 301        struct page *pages;
 302
 303        if (fatal_signal_pending(current))
 304                return NULL;
 305        pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
 306        if (pages) {
 307                unsigned int count, i;
 308
 309                pages->mapping = NULL;
 310                set_page_private(pages, order);
 311                count = 1 << order;
 312                for (i = 0; i < count; i++)
 313                        SetPageReserved(pages + i);
 314
 315                arch_kexec_post_alloc_pages(page_address(pages), count,
 316                                            gfp_mask);
 317
 318                if (gfp_mask & __GFP_ZERO)
 319                        for (i = 0; i < count; i++)
 320                                clear_highpage(pages + i);
 321        }
 322
 323        return pages;
 324}
 325
 326static void kimage_free_pages(struct page *page)
 327{
 328        unsigned int order, count, i;
 329
 330        order = page_private(page);
 331        count = 1 << order;
 332
 333        arch_kexec_pre_free_pages(page_address(page), count);
 334
 335        for (i = 0; i < count; i++)
 336                ClearPageReserved(page + i);
 337        __free_pages(page, order);
 338}
 339
 340void kimage_free_page_list(struct list_head *list)
 341{
 342        struct page *page, *next;
 343
 344        list_for_each_entry_safe(page, next, list, lru) {
 345                list_del(&page->lru);
 346                kimage_free_pages(page);
 347        }
 348}
 349
 350static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 351                                                        unsigned int order)
 352{
 353        /* Control pages are special, they are the intermediaries
 354         * that are needed while we copy the rest of the pages
 355         * to their final resting place.  As such they must
 356         * not conflict with either the destination addresses
 357         * or memory the kernel is already using.
 358         *
 359         * The only case where we really need more than one of
 360         * these are for architectures where we cannot disable
 361         * the MMU and must instead generate an identity mapped
 362         * page table for all of the memory.
 363         *
 364         * At worst this runs in O(N) of the image size.
 365         */
 366        struct list_head extra_pages;
 367        struct page *pages;
 368        unsigned int count;
 369
 370        count = 1 << order;
 371        INIT_LIST_HEAD(&extra_pages);
 372
 373        /* Loop while I can allocate a page and the page allocated
 374         * is a destination page.
 375         */
 376        do {
 377                unsigned long pfn, epfn, addr, eaddr;
 378
 379                pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
 380                if (!pages)
 381                        break;
 382                pfn   = page_to_boot_pfn(pages);
 383                epfn  = pfn + count;
 384                addr  = pfn << PAGE_SHIFT;
 385                eaddr = epfn << PAGE_SHIFT;
 386                if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 387                              kimage_is_destination_range(image, addr, eaddr)) {
 388                        list_add(&pages->lru, &extra_pages);
 389                        pages = NULL;
 390                }
 391        } while (!pages);
 392
 393        if (pages) {
 394                /* Remember the allocated page... */
 395                list_add(&pages->lru, &image->control_pages);
 396
 397                /* Because the page is already in it's destination
 398                 * location we will never allocate another page at
 399                 * that address.  Therefore kimage_alloc_pages
 400                 * will not return it (again) and we don't need
 401                 * to give it an entry in image->segment[].
 402                 */
 403        }
 404        /* Deal with the destination pages I have inadvertently allocated.
 405         *
 406         * Ideally I would convert multi-page allocations into single
 407         * page allocations, and add everything to image->dest_pages.
 408         *
 409         * For now it is simpler to just free the pages.
 410         */
 411        kimage_free_page_list(&extra_pages);
 412
 413        return pages;
 414}
 415
 416static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 417                                                      unsigned int order)
 418{
 419        /* Control pages are special, they are the intermediaries
 420         * that are needed while we copy the rest of the pages
 421         * to their final resting place.  As such they must
 422         * not conflict with either the destination addresses
 423         * or memory the kernel is already using.
 424         *
 425         * Control pages are also the only pags we must allocate
 426         * when loading a crash kernel.  All of the other pages
 427         * are specified by the segments and we just memcpy
 428         * into them directly.
 429         *
 430         * The only case where we really need more than one of
 431         * these are for architectures where we cannot disable
 432         * the MMU and must instead generate an identity mapped
 433         * page table for all of the memory.
 434         *
 435         * Given the low demand this implements a very simple
 436         * allocator that finds the first hole of the appropriate
 437         * size in the reserved memory region, and allocates all
 438         * of the memory up to and including the hole.
 439         */
 440        unsigned long hole_start, hole_end, size;
 441        struct page *pages;
 442
 443        pages = NULL;
 444        size = (1 << order) << PAGE_SHIFT;
 445        hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 446        hole_end   = hole_start + size - 1;
 447        while (hole_end <= crashk_res.end) {
 448                unsigned long i;
 449
 450                cond_resched();
 451
 452                if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 453                        break;
 454                /* See if I overlap any of the segments */
 455                for (i = 0; i < image->nr_segments; i++) {
 456                        unsigned long mstart, mend;
 457
 458                        mstart = image->segment[i].mem;
 459                        mend   = mstart + image->segment[i].memsz - 1;
 460                        if ((hole_end >= mstart) && (hole_start <= mend)) {
 461                                /* Advance the hole to the end of the segment */
 462                                hole_start = (mend + (size - 1)) & ~(size - 1);
 463                                hole_end   = hole_start + size - 1;
 464                                break;
 465                        }
 466                }
 467                /* If I don't overlap any segments I have found my hole! */
 468                if (i == image->nr_segments) {
 469                        pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 470                        image->control_page = hole_end;
 471                        break;
 472                }
 473        }
 474
 475        /* Ensure that these pages are decrypted if SME is enabled. */
 476        if (pages)
 477                arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
 478
 479        return pages;
 480}
 481
 482
 483struct page *kimage_alloc_control_pages(struct kimage *image,
 484                                         unsigned int order)
 485{
 486        struct page *pages = NULL;
 487
 488        switch (image->type) {
 489        case KEXEC_TYPE_DEFAULT:
 490                pages = kimage_alloc_normal_control_pages(image, order);
 491                break;
 492        case KEXEC_TYPE_CRASH:
 493                pages = kimage_alloc_crash_control_pages(image, order);
 494                break;
 495        }
 496
 497        return pages;
 498}
 499
 500int kimage_crash_copy_vmcoreinfo(struct kimage *image)
 501{
 502        struct page *vmcoreinfo_page;
 503        void *safecopy;
 504
 505        if (image->type != KEXEC_TYPE_CRASH)
 506                return 0;
 507
 508        /*
 509         * For kdump, allocate one vmcoreinfo safe copy from the
 510         * crash memory. as we have arch_kexec_protect_crashkres()
 511         * after kexec syscall, we naturally protect it from write
 512         * (even read) access under kernel direct mapping. But on
 513         * the other hand, we still need to operate it when crash
 514         * happens to generate vmcoreinfo note, hereby we rely on
 515         * vmap for this purpose.
 516         */
 517        vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
 518        if (!vmcoreinfo_page) {
 519                pr_warn("Could not allocate vmcoreinfo buffer\n");
 520                return -ENOMEM;
 521        }
 522        safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
 523        if (!safecopy) {
 524                pr_warn("Could not vmap vmcoreinfo buffer\n");
 525                return -ENOMEM;
 526        }
 527
 528        image->vmcoreinfo_data_copy = safecopy;
 529        crash_update_vmcoreinfo_safecopy(safecopy);
 530
 531        return 0;
 532}
 533
 534static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 535{
 536        if (*image->entry != 0)
 537                image->entry++;
 538
 539        if (image->entry == image->last_entry) {
 540                kimage_entry_t *ind_page;
 541                struct page *page;
 542
 543                page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 544                if (!page)
 545                        return -ENOMEM;
 546
 547                ind_page = page_address(page);
 548                *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
 549                image->entry = ind_page;
 550                image->last_entry = ind_page +
 551                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 552        }
 553        *image->entry = entry;
 554        image->entry++;
 555        *image->entry = 0;
 556
 557        return 0;
 558}
 559
 560static int kimage_set_destination(struct kimage *image,
 561                                   unsigned long destination)
 562{
 563        int result;
 564
 565        destination &= PAGE_MASK;
 566        result = kimage_add_entry(image, destination | IND_DESTINATION);
 567
 568        return result;
 569}
 570
 571
 572static int kimage_add_page(struct kimage *image, unsigned long page)
 573{
 574        int result;
 575
 576        page &= PAGE_MASK;
 577        result = kimage_add_entry(image, page | IND_SOURCE);
 578
 579        return result;
 580}
 581
 582
 583static void kimage_free_extra_pages(struct kimage *image)
 584{
 585        /* Walk through and free any extra destination pages I may have */
 586        kimage_free_page_list(&image->dest_pages);
 587
 588        /* Walk through and free any unusable pages I have cached */
 589        kimage_free_page_list(&image->unusable_pages);
 590
 591}
 592
 593int __weak machine_kexec_post_load(struct kimage *image)
 594{
 595        return 0;
 596}
 597
 598void kimage_terminate(struct kimage *image)
 599{
 600        if (*image->entry != 0)
 601                image->entry++;
 602
 603        *image->entry = IND_DONE;
 604}
 605
 606#define for_each_kimage_entry(image, ptr, entry) \
 607        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 608                ptr = (entry & IND_INDIRECTION) ? \
 609                        boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
 610
 611static void kimage_free_entry(kimage_entry_t entry)
 612{
 613        struct page *page;
 614
 615        page = boot_pfn_to_page(entry >> PAGE_SHIFT);
 616        kimage_free_pages(page);
 617}
 618
 619void kimage_free(struct kimage *image)
 620{
 621        kimage_entry_t *ptr, entry;
 622        kimage_entry_t ind = 0;
 623
 624        if (!image)
 625                return;
 626
 627        if (image->vmcoreinfo_data_copy) {
 628                crash_update_vmcoreinfo_safecopy(NULL);
 629                vunmap(image->vmcoreinfo_data_copy);
 630        }
 631
 632        kimage_free_extra_pages(image);
 633        for_each_kimage_entry(image, ptr, entry) {
 634                if (entry & IND_INDIRECTION) {
 635                        /* Free the previous indirection page */
 636                        if (ind & IND_INDIRECTION)
 637                                kimage_free_entry(ind);
 638                        /* Save this indirection page until we are
 639                         * done with it.
 640                         */
 641                        ind = entry;
 642                } else if (entry & IND_SOURCE)
 643                        kimage_free_entry(entry);
 644        }
 645        /* Free the final indirection page */
 646        if (ind & IND_INDIRECTION)
 647                kimage_free_entry(ind);
 648
 649        /* Handle any machine specific cleanup */
 650        machine_kexec_cleanup(image);
 651
 652        /* Free the kexec control pages... */
 653        kimage_free_page_list(&image->control_pages);
 654
 655        /*
 656         * Free up any temporary buffers allocated. This might hit if
 657         * error occurred much later after buffer allocation.
 658         */
 659        if (image->file_mode)
 660                kimage_file_post_load_cleanup(image);
 661
 662        kfree(image);
 663}
 664
 665static kimage_entry_t *kimage_dst_used(struct kimage *image,
 666                                        unsigned long page)
 667{
 668        kimage_entry_t *ptr, entry;
 669        unsigned long destination = 0;
 670
 671        for_each_kimage_entry(image, ptr, entry) {
 672                if (entry & IND_DESTINATION)
 673                        destination = entry & PAGE_MASK;
 674                else if (entry & IND_SOURCE) {
 675                        if (page == destination)
 676                                return ptr;
 677                        destination += PAGE_SIZE;
 678                }
 679        }
 680
 681        return NULL;
 682}
 683
 684static struct page *kimage_alloc_page(struct kimage *image,
 685                                        gfp_t gfp_mask,
 686                                        unsigned long destination)
 687{
 688        /*
 689         * Here we implement safeguards to ensure that a source page
 690         * is not copied to its destination page before the data on
 691         * the destination page is no longer useful.
 692         *
 693         * To do this we maintain the invariant that a source page is
 694         * either its own destination page, or it is not a
 695         * destination page at all.
 696         *
 697         * That is slightly stronger than required, but the proof
 698         * that no problems will not occur is trivial, and the
 699         * implementation is simply to verify.
 700         *
 701         * When allocating all pages normally this algorithm will run
 702         * in O(N) time, but in the worst case it will run in O(N^2)
 703         * time.   If the runtime is a problem the data structures can
 704         * be fixed.
 705         */
 706        struct page *page;
 707        unsigned long addr;
 708
 709        /*
 710         * Walk through the list of destination pages, and see if I
 711         * have a match.
 712         */
 713        list_for_each_entry(page, &image->dest_pages, lru) {
 714                addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 715                if (addr == destination) {
 716                        list_del(&page->lru);
 717                        return page;
 718                }
 719        }
 720        page = NULL;
 721        while (1) {
 722                kimage_entry_t *old;
 723
 724                /* Allocate a page, if we run out of memory give up */
 725                page = kimage_alloc_pages(gfp_mask, 0);
 726                if (!page)
 727                        return NULL;
 728                /* If the page cannot be used file it away */
 729                if (page_to_boot_pfn(page) >
 730                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 731                        list_add(&page->lru, &image->unusable_pages);
 732                        continue;
 733                }
 734                addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 735
 736                /* If it is the destination page we want use it */
 737                if (addr == destination)
 738                        break;
 739
 740                /* If the page is not a destination page use it */
 741                if (!kimage_is_destination_range(image, addr,
 742                                                  addr + PAGE_SIZE))
 743                        break;
 744
 745                /*
 746                 * I know that the page is someones destination page.
 747                 * See if there is already a source page for this
 748                 * destination page.  And if so swap the source pages.
 749                 */
 750                old = kimage_dst_used(image, addr);
 751                if (old) {
 752                        /* If so move it */
 753                        unsigned long old_addr;
 754                        struct page *old_page;
 755
 756                        old_addr = *old & PAGE_MASK;
 757                        old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
 758                        copy_highpage(page, old_page);
 759                        *old = addr | (*old & ~PAGE_MASK);
 760
 761                        /* The old page I have found cannot be a
 762                         * destination page, so return it if it's
 763                         * gfp_flags honor the ones passed in.
 764                         */
 765                        if (!(gfp_mask & __GFP_HIGHMEM) &&
 766                            PageHighMem(old_page)) {
 767                                kimage_free_pages(old_page);
 768                                continue;
 769                        }
 770                        addr = old_addr;
 771                        page = old_page;
 772                        break;
 773                }
 774                /* Place the page on the destination list, to be used later */
 775                list_add(&page->lru, &image->dest_pages);
 776        }
 777
 778        return page;
 779}
 780
 781static int kimage_load_normal_segment(struct kimage *image,
 782                                         struct kexec_segment *segment)
 783{
 784        unsigned long maddr;
 785        size_t ubytes, mbytes;
 786        int result;
 787        unsigned char __user *buf = NULL;
 788        unsigned char *kbuf = NULL;
 789
 790        result = 0;
 791        if (image->file_mode)
 792                kbuf = segment->kbuf;
 793        else
 794                buf = segment->buf;
 795        ubytes = segment->bufsz;
 796        mbytes = segment->memsz;
 797        maddr = segment->mem;
 798
 799        result = kimage_set_destination(image, maddr);
 800        if (result < 0)
 801                goto out;
 802
 803        while (mbytes) {
 804                struct page *page;
 805                char *ptr;
 806                size_t uchunk, mchunk;
 807
 808                page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 809                if (!page) {
 810                        result  = -ENOMEM;
 811                        goto out;
 812                }
 813                result = kimage_add_page(image, page_to_boot_pfn(page)
 814                                                                << PAGE_SHIFT);
 815                if (result < 0)
 816                        goto out;
 817
 818                ptr = kmap(page);
 819                /* Start with a clear page */
 820                clear_page(ptr);
 821                ptr += maddr & ~PAGE_MASK;
 822                mchunk = min_t(size_t, mbytes,
 823                                PAGE_SIZE - (maddr & ~PAGE_MASK));
 824                uchunk = min(ubytes, mchunk);
 825
 826                /* For file based kexec, source pages are in kernel memory */
 827                if (image->file_mode)
 828                        memcpy(ptr, kbuf, uchunk);
 829                else
 830                        result = copy_from_user(ptr, buf, uchunk);
 831                kunmap(page);
 832                if (result) {
 833                        result = -EFAULT;
 834                        goto out;
 835                }
 836                ubytes -= uchunk;
 837                maddr  += mchunk;
 838                if (image->file_mode)
 839                        kbuf += mchunk;
 840                else
 841                        buf += mchunk;
 842                mbytes -= mchunk;
 843
 844                cond_resched();
 845        }
 846out:
 847        return result;
 848}
 849
 850static int kimage_load_crash_segment(struct kimage *image,
 851                                        struct kexec_segment *segment)
 852{
 853        /* For crash dumps kernels we simply copy the data from
 854         * user space to it's destination.
 855         * We do things a page at a time for the sake of kmap.
 856         */
 857        unsigned long maddr;
 858        size_t ubytes, mbytes;
 859        int result;
 860        unsigned char __user *buf = NULL;
 861        unsigned char *kbuf = NULL;
 862
 863        result = 0;
 864        if (image->file_mode)
 865                kbuf = segment->kbuf;
 866        else
 867                buf = segment->buf;
 868        ubytes = segment->bufsz;
 869        mbytes = segment->memsz;
 870        maddr = segment->mem;
 871        while (mbytes) {
 872                struct page *page;
 873                char *ptr;
 874                size_t uchunk, mchunk;
 875
 876                page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
 877                if (!page) {
 878                        result  = -ENOMEM;
 879                        goto out;
 880                }
 881                arch_kexec_post_alloc_pages(page_address(page), 1, 0);
 882                ptr = kmap(page);
 883                ptr += maddr & ~PAGE_MASK;
 884                mchunk = min_t(size_t, mbytes,
 885                                PAGE_SIZE - (maddr & ~PAGE_MASK));
 886                uchunk = min(ubytes, mchunk);
 887                if (mchunk > uchunk) {
 888                        /* Zero the trailing part of the page */
 889                        memset(ptr + uchunk, 0, mchunk - uchunk);
 890                }
 891
 892                /* For file based kexec, source pages are in kernel memory */
 893                if (image->file_mode)
 894                        memcpy(ptr, kbuf, uchunk);
 895                else
 896                        result = copy_from_user(ptr, buf, uchunk);
 897                kexec_flush_icache_page(page);
 898                kunmap(page);
 899                arch_kexec_pre_free_pages(page_address(page), 1);
 900                if (result) {
 901                        result = -EFAULT;
 902                        goto out;
 903                }
 904                ubytes -= uchunk;
 905                maddr  += mchunk;
 906                if (image->file_mode)
 907                        kbuf += mchunk;
 908                else
 909                        buf += mchunk;
 910                mbytes -= mchunk;
 911
 912                cond_resched();
 913        }
 914out:
 915        return result;
 916}
 917
 918int kimage_load_segment(struct kimage *image,
 919                                struct kexec_segment *segment)
 920{
 921        int result = -ENOMEM;
 922
 923        switch (image->type) {
 924        case KEXEC_TYPE_DEFAULT:
 925                result = kimage_load_normal_segment(image, segment);
 926                break;
 927        case KEXEC_TYPE_CRASH:
 928                result = kimage_load_crash_segment(image, segment);
 929                break;
 930        }
 931
 932        return result;
 933}
 934
 935struct kimage *kexec_image;
 936struct kimage *kexec_crash_image;
 937int kexec_load_disabled;
 938
 939/*
 940 * No panic_cpu check version of crash_kexec().  This function is called
 941 * only when panic_cpu holds the current CPU number; this is the only CPU
 942 * which processes crash_kexec routines.
 943 */
 944void __noclone __crash_kexec(struct pt_regs *regs)
 945{
 946        /* Take the kexec_mutex here to prevent sys_kexec_load
 947         * running on one cpu from replacing the crash kernel
 948         * we are using after a panic on a different cpu.
 949         *
 950         * If the crash kernel was not located in a fixed area
 951         * of memory the xchg(&kexec_crash_image) would be
 952         * sufficient.  But since I reuse the memory...
 953         */
 954        if (mutex_trylock(&kexec_mutex)) {
 955                if (kexec_crash_image) {
 956                        struct pt_regs fixed_regs;
 957
 958                        crash_setup_regs(&fixed_regs, regs);
 959                        crash_save_vmcoreinfo();
 960                        machine_crash_shutdown(&fixed_regs);
 961                        machine_kexec(kexec_crash_image);
 962                }
 963                mutex_unlock(&kexec_mutex);
 964        }
 965}
 966STACK_FRAME_NON_STANDARD(__crash_kexec);
 967
 968void crash_kexec(struct pt_regs *regs)
 969{
 970        int old_cpu, this_cpu;
 971
 972        /*
 973         * Only one CPU is allowed to execute the crash_kexec() code as with
 974         * panic().  Otherwise parallel calls of panic() and crash_kexec()
 975         * may stop each other.  To exclude them, we use panic_cpu here too.
 976         */
 977        this_cpu = raw_smp_processor_id();
 978        old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
 979        if (old_cpu == PANIC_CPU_INVALID) {
 980                /* This is the 1st CPU which comes here, so go ahead. */
 981                printk_safe_flush_on_panic();
 982                __crash_kexec(regs);
 983
 984                /*
 985                 * Reset panic_cpu to allow another panic()/crash_kexec()
 986                 * call.
 987                 */
 988                atomic_set(&panic_cpu, PANIC_CPU_INVALID);
 989        }
 990}
 991
 992size_t crash_get_memory_size(void)
 993{
 994        size_t size = 0;
 995
 996        mutex_lock(&kexec_mutex);
 997        if (crashk_res.end != crashk_res.start)
 998                size = resource_size(&crashk_res);
 999        mutex_unlock(&kexec_mutex);
1000        return size;
1001}
1002
1003void __weak crash_free_reserved_phys_range(unsigned long begin,
1004                                           unsigned long end)
1005{
1006        unsigned long addr;
1007
1008        for (addr = begin; addr < end; addr += PAGE_SIZE)
1009                free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
1010}
1011
1012int crash_shrink_memory(unsigned long new_size)
1013{
1014        int ret = 0;
1015        unsigned long start, end;
1016        unsigned long old_size;
1017        struct resource *ram_res;
1018
1019        mutex_lock(&kexec_mutex);
1020
1021        if (kexec_crash_image) {
1022                ret = -ENOENT;
1023                goto unlock;
1024        }
1025        start = crashk_res.start;
1026        end = crashk_res.end;
1027        old_size = (end == 0) ? 0 : end - start + 1;
1028        if (new_size >= old_size) {
1029                ret = (new_size == old_size) ? 0 : -EINVAL;
1030                goto unlock;
1031        }
1032
1033        ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1034        if (!ram_res) {
1035                ret = -ENOMEM;
1036                goto unlock;
1037        }
1038
1039        start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1040        end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1041
1042        crash_free_reserved_phys_range(end, crashk_res.end);
1043
1044        if ((start == end) && (crashk_res.parent != NULL))
1045                release_resource(&crashk_res);
1046
1047        ram_res->start = end;
1048        ram_res->end = crashk_res.end;
1049        ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1050        ram_res->name = "System RAM";
1051
1052        crashk_res.end = end - 1;
1053
1054        insert_resource(&iomem_resource, ram_res);
1055
1056unlock:
1057        mutex_unlock(&kexec_mutex);
1058        return ret;
1059}
1060
1061void crash_save_cpu(struct pt_regs *regs, int cpu)
1062{
1063        struct elf_prstatus prstatus;
1064        u32 *buf;
1065
1066        if ((cpu < 0) || (cpu >= nr_cpu_ids))
1067                return;
1068
1069        /* Using ELF notes here is opportunistic.
1070         * I need a well defined structure format
1071         * for the data I pass, and I need tags
1072         * on the data to indicate what information I have
1073         * squirrelled away.  ELF notes happen to provide
1074         * all of that, so there is no need to invent something new.
1075         */
1076        buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1077        if (!buf)
1078                return;
1079        memset(&prstatus, 0, sizeof(prstatus));
1080        prstatus.pr_pid = current->pid;
1081        elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1082        buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1083                              &prstatus, sizeof(prstatus));
1084        final_note(buf);
1085}
1086
1087static int __init crash_notes_memory_init(void)
1088{
1089        /* Allocate memory for saving cpu registers. */
1090        size_t size, align;
1091
1092        /*
1093         * crash_notes could be allocated across 2 vmalloc pages when percpu
1094         * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1095         * pages are also on 2 continuous physical pages. In this case the
1096         * 2nd part of crash_notes in 2nd page could be lost since only the
1097         * starting address and size of crash_notes are exported through sysfs.
1098         * Here round up the size of crash_notes to the nearest power of two
1099         * and pass it to __alloc_percpu as align value. This can make sure
1100         * crash_notes is allocated inside one physical page.
1101         */
1102        size = sizeof(note_buf_t);
1103        align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1104
1105        /*
1106         * Break compile if size is bigger than PAGE_SIZE since crash_notes
1107         * definitely will be in 2 pages with that.
1108         */
1109        BUILD_BUG_ON(size > PAGE_SIZE);
1110
1111        crash_notes = __alloc_percpu(size, align);
1112        if (!crash_notes) {
1113                pr_warn("Memory allocation for saving cpu register states failed\n");
1114                return -ENOMEM;
1115        }
1116        return 0;
1117}
1118subsys_initcall(crash_notes_memory_init);
1119
1120
1121/*
1122 * Move into place and start executing a preloaded standalone
1123 * executable.  If nothing was preloaded return an error.
1124 */
1125int kernel_kexec(void)
1126{
1127        int error = 0;
1128
1129        if (!mutex_trylock(&kexec_mutex))
1130                return -EBUSY;
1131        if (!kexec_image) {
1132                error = -EINVAL;
1133                goto Unlock;
1134        }
1135
1136#ifdef CONFIG_KEXEC_JUMP
1137        if (kexec_image->preserve_context) {
1138                lock_system_sleep();
1139                pm_prepare_console();
1140                error = freeze_processes();
1141                if (error) {
1142                        error = -EBUSY;
1143                        goto Restore_console;
1144                }
1145                suspend_console();
1146                error = dpm_suspend_start(PMSG_FREEZE);
1147                if (error)
1148                        goto Resume_console;
1149                /* At this point, dpm_suspend_start() has been called,
1150                 * but *not* dpm_suspend_end(). We *must* call
1151                 * dpm_suspend_end() now.  Otherwise, drivers for
1152                 * some devices (e.g. interrupt controllers) become
1153                 * desynchronized with the actual state of the
1154                 * hardware at resume time, and evil weirdness ensues.
1155                 */
1156                error = dpm_suspend_end(PMSG_FREEZE);
1157                if (error)
1158                        goto Resume_devices;
1159                error = suspend_disable_secondary_cpus();
1160                if (error)
1161                        goto Enable_cpus;
1162                local_irq_disable();
1163                error = syscore_suspend();
1164                if (error)
1165                        goto Enable_irqs;
1166        } else
1167#endif
1168        {
1169                kexec_in_progress = true;
1170                kernel_restart_prepare(NULL);
1171                migrate_to_reboot_cpu();
1172
1173                /*
1174                 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1175                 * no further code needs to use CPU hotplug (which is true in
1176                 * the reboot case). However, the kexec path depends on using
1177                 * CPU hotplug again; so re-enable it here.
1178                 */
1179                cpu_hotplug_enable();
1180                pr_notice("Starting new kernel\n");
1181                machine_shutdown();
1182        }
1183
1184        machine_kexec(kexec_image);
1185
1186#ifdef CONFIG_KEXEC_JUMP
1187        if (kexec_image->preserve_context) {
1188                syscore_resume();
1189 Enable_irqs:
1190                local_irq_enable();
1191 Enable_cpus:
1192                suspend_enable_secondary_cpus();
1193                dpm_resume_start(PMSG_RESTORE);
1194 Resume_devices:
1195                dpm_resume_end(PMSG_RESTORE);
1196 Resume_console:
1197                resume_console();
1198                thaw_processes();
1199 Restore_console:
1200                pm_restore_console();
1201                unlock_system_sleep();
1202        }
1203#endif
1204
1205 Unlock:
1206        mutex_unlock(&kexec_mutex);
1207        return error;
1208}
1209
1210/*
1211 * Protection mechanism for crashkernel reserved memory after
1212 * the kdump kernel is loaded.
1213 *
1214 * Provide an empty default implementation here -- architecture
1215 * code may override this
1216 */
1217void __weak arch_kexec_protect_crashkres(void)
1218{}
1219
1220void __weak arch_kexec_unprotect_crashkres(void)
1221{}
1222