linux/kernel/kexec_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kexec.c - kexec system call core code.
   4 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/kexec.h>
  15#include <linux/mutex.h>
  16#include <linux/list.h>
  17#include <linux/highmem.h>
  18#include <linux/syscalls.h>
  19#include <linux/reboot.h>
  20#include <linux/ioport.h>
  21#include <linux/hardirq.h>
  22#include <linux/elf.h>
  23#include <linux/elfcore.h>
  24#include <linux/utsname.h>
  25#include <linux/numa.h>
  26#include <linux/suspend.h>
  27#include <linux/device.h>
  28#include <linux/freezer.h>
  29#include <linux/panic_notifier.h>
  30#include <linux/pm.h>
  31#include <linux/cpu.h>
  32#include <linux/uaccess.h>
  33#include <linux/io.h>
  34#include <linux/console.h>
  35#include <linux/vmalloc.h>
  36#include <linux/swap.h>
  37#include <linux/syscore_ops.h>
  38#include <linux/compiler.h>
  39#include <linux/hugetlb.h>
  40#include <linux/objtool.h>
  41#include <linux/kmsg_dump.h>
  42
  43#include <asm/page.h>
  44#include <asm/sections.h>
  45
  46#include <crypto/hash.h>
  47#include "kexec_internal.h"
  48
  49DEFINE_MUTEX(kexec_mutex);
  50
  51/* Per cpu memory for storing cpu states in case of system crash. */
  52note_buf_t __percpu *crash_notes;
  53
  54/* Flag to indicate we are going to kexec a new kernel */
  55bool kexec_in_progress = false;
  56
  57
  58/* Location of the reserved area for the crash kernel */
  59struct resource crashk_res = {
  60        .name  = "Crash kernel",
  61        .start = 0,
  62        .end   = 0,
  63        .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  64        .desc  = IORES_DESC_CRASH_KERNEL
  65};
  66struct resource crashk_low_res = {
  67        .name  = "Crash kernel",
  68        .start = 0,
  69        .end   = 0,
  70        .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  71        .desc  = IORES_DESC_CRASH_KERNEL
  72};
  73
  74int kexec_should_crash(struct task_struct *p)
  75{
  76        /*
  77         * If crash_kexec_post_notifiers is enabled, don't run
  78         * crash_kexec() here yet, which must be run after panic
  79         * notifiers in panic().
  80         */
  81        if (crash_kexec_post_notifiers)
  82                return 0;
  83        /*
  84         * There are 4 panic() calls in do_exit() path, each of which
  85         * corresponds to each of these 4 conditions.
  86         */
  87        if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  88                return 1;
  89        return 0;
  90}
  91
  92int kexec_crash_loaded(void)
  93{
  94        return !!kexec_crash_image;
  95}
  96EXPORT_SYMBOL_GPL(kexec_crash_loaded);
  97
  98/*
  99 * When kexec transitions to the new kernel there is a one-to-one
 100 * mapping between physical and virtual addresses.  On processors
 101 * where you can disable the MMU this is trivial, and easy.  For
 102 * others it is still a simple predictable page table to setup.
 103 *
 104 * In that environment kexec copies the new kernel to its final
 105 * resting place.  This means I can only support memory whose
 106 * physical address can fit in an unsigned long.  In particular
 107 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 108 * If the assembly stub has more restrictive requirements
 109 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 110 * defined more restrictively in <asm/kexec.h>.
 111 *
 112 * The code for the transition from the current kernel to the
 113 * new kernel is placed in the control_code_buffer, whose size
 114 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
 115 * page of memory is necessary, but some architectures require more.
 116 * Because this memory must be identity mapped in the transition from
 117 * virtual to physical addresses it must live in the range
 118 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 119 * modifiable.
 120 *
 121 * The assembly stub in the control code buffer is passed a linked list
 122 * of descriptor pages detailing the source pages of the new kernel,
 123 * and the destination addresses of those source pages.  As this data
 124 * structure is not used in the context of the current OS, it must
 125 * be self-contained.
 126 *
 127 * The code has been made to work with highmem pages and will use a
 128 * destination page in its final resting place (if it happens
 129 * to allocate it).  The end product of this is that most of the
 130 * physical address space, and most of RAM can be used.
 131 *
 132 * Future directions include:
 133 *  - allocating a page table with the control code buffer identity
 134 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 135 *    reliable.
 136 */
 137
 138/*
 139 * KIMAGE_NO_DEST is an impossible destination address..., for
 140 * allocating pages whose destination address we do not care about.
 141 */
 142#define KIMAGE_NO_DEST (-1UL)
 143#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
 144
 145static struct page *kimage_alloc_page(struct kimage *image,
 146                                       gfp_t gfp_mask,
 147                                       unsigned long dest);
 148
 149int sanity_check_segment_list(struct kimage *image)
 150{
 151        int i;
 152        unsigned long nr_segments = image->nr_segments;
 153        unsigned long total_pages = 0;
 154        unsigned long nr_pages = totalram_pages();
 155
 156        /*
 157         * Verify we have good destination addresses.  The caller is
 158         * responsible for making certain we don't attempt to load
 159         * the new image into invalid or reserved areas of RAM.  This
 160         * just verifies it is an address we can use.
 161         *
 162         * Since the kernel does everything in page size chunks ensure
 163         * the destination addresses are page aligned.  Too many
 164         * special cases crop of when we don't do this.  The most
 165         * insidious is getting overlapping destination addresses
 166         * simply because addresses are changed to page size
 167         * granularity.
 168         */
 169        for (i = 0; i < nr_segments; i++) {
 170                unsigned long mstart, mend;
 171
 172                mstart = image->segment[i].mem;
 173                mend   = mstart + image->segment[i].memsz;
 174                if (mstart > mend)
 175                        return -EADDRNOTAVAIL;
 176                if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 177                        return -EADDRNOTAVAIL;
 178                if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 179                        return -EADDRNOTAVAIL;
 180        }
 181
 182        /* Verify our destination addresses do not overlap.
 183         * If we alloed overlapping destination addresses
 184         * through very weird things can happen with no
 185         * easy explanation as one segment stops on another.
 186         */
 187        for (i = 0; i < nr_segments; i++) {
 188                unsigned long mstart, mend;
 189                unsigned long j;
 190
 191                mstart = image->segment[i].mem;
 192                mend   = mstart + image->segment[i].memsz;
 193                for (j = 0; j < i; j++) {
 194                        unsigned long pstart, pend;
 195
 196                        pstart = image->segment[j].mem;
 197                        pend   = pstart + image->segment[j].memsz;
 198                        /* Do the segments overlap ? */
 199                        if ((mend > pstart) && (mstart < pend))
 200                                return -EINVAL;
 201                }
 202        }
 203
 204        /* Ensure our buffer sizes are strictly less than
 205         * our memory sizes.  This should always be the case,
 206         * and it is easier to check up front than to be surprised
 207         * later on.
 208         */
 209        for (i = 0; i < nr_segments; i++) {
 210                if (image->segment[i].bufsz > image->segment[i].memsz)
 211                        return -EINVAL;
 212        }
 213
 214        /*
 215         * Verify that no more than half of memory will be consumed. If the
 216         * request from userspace is too large, a large amount of time will be
 217         * wasted allocating pages, which can cause a soft lockup.
 218         */
 219        for (i = 0; i < nr_segments; i++) {
 220                if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
 221                        return -EINVAL;
 222
 223                total_pages += PAGE_COUNT(image->segment[i].memsz);
 224        }
 225
 226        if (total_pages > nr_pages / 2)
 227                return -EINVAL;
 228
 229        /*
 230         * Verify we have good destination addresses.  Normally
 231         * the caller is responsible for making certain we don't
 232         * attempt to load the new image into invalid or reserved
 233         * areas of RAM.  But crash kernels are preloaded into a
 234         * reserved area of ram.  We must ensure the addresses
 235         * are in the reserved area otherwise preloading the
 236         * kernel could corrupt things.
 237         */
 238
 239        if (image->type == KEXEC_TYPE_CRASH) {
 240                for (i = 0; i < nr_segments; i++) {
 241                        unsigned long mstart, mend;
 242
 243                        mstart = image->segment[i].mem;
 244                        mend = mstart + image->segment[i].memsz - 1;
 245                        /* Ensure we are within the crash kernel limits */
 246                        if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
 247                            (mend > phys_to_boot_phys(crashk_res.end)))
 248                                return -EADDRNOTAVAIL;
 249                }
 250        }
 251
 252        return 0;
 253}
 254
 255struct kimage *do_kimage_alloc_init(void)
 256{
 257        struct kimage *image;
 258
 259        /* Allocate a controlling structure */
 260        image = kzalloc(sizeof(*image), GFP_KERNEL);
 261        if (!image)
 262                return NULL;
 263
 264        image->head = 0;
 265        image->entry = &image->head;
 266        image->last_entry = &image->head;
 267        image->control_page = ~0; /* By default this does not apply */
 268        image->type = KEXEC_TYPE_DEFAULT;
 269
 270        /* Initialize the list of control pages */
 271        INIT_LIST_HEAD(&image->control_pages);
 272
 273        /* Initialize the list of destination pages */
 274        INIT_LIST_HEAD(&image->dest_pages);
 275
 276        /* Initialize the list of unusable pages */
 277        INIT_LIST_HEAD(&image->unusable_pages);
 278
 279        return image;
 280}
 281
 282int kimage_is_destination_range(struct kimage *image,
 283                                        unsigned long start,
 284                                        unsigned long end)
 285{
 286        unsigned long i;
 287
 288        for (i = 0; i < image->nr_segments; i++) {
 289                unsigned long mstart, mend;
 290
 291                mstart = image->segment[i].mem;
 292                mend = mstart + image->segment[i].memsz;
 293                if ((end > mstart) && (start < mend))
 294                        return 1;
 295        }
 296
 297        return 0;
 298}
 299
 300static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 301{
 302        struct page *pages;
 303
 304        if (fatal_signal_pending(current))
 305                return NULL;
 306        pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
 307        if (pages) {
 308                unsigned int count, i;
 309
 310                pages->mapping = NULL;
 311                set_page_private(pages, order);
 312                count = 1 << order;
 313                for (i = 0; i < count; i++)
 314                        SetPageReserved(pages + i);
 315
 316                arch_kexec_post_alloc_pages(page_address(pages), count,
 317                                            gfp_mask);
 318
 319                if (gfp_mask & __GFP_ZERO)
 320                        for (i = 0; i < count; i++)
 321                                clear_highpage(pages + i);
 322        }
 323
 324        return pages;
 325}
 326
 327static void kimage_free_pages(struct page *page)
 328{
 329        unsigned int order, count, i;
 330
 331        order = page_private(page);
 332        count = 1 << order;
 333
 334        arch_kexec_pre_free_pages(page_address(page), count);
 335
 336        for (i = 0; i < count; i++)
 337                ClearPageReserved(page + i);
 338        __free_pages(page, order);
 339}
 340
 341void kimage_free_page_list(struct list_head *list)
 342{
 343        struct page *page, *next;
 344
 345        list_for_each_entry_safe(page, next, list, lru) {
 346                list_del(&page->lru);
 347                kimage_free_pages(page);
 348        }
 349}
 350
 351static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 352                                                        unsigned int order)
 353{
 354        /* Control pages are special, they are the intermediaries
 355         * that are needed while we copy the rest of the pages
 356         * to their final resting place.  As such they must
 357         * not conflict with either the destination addresses
 358         * or memory the kernel is already using.
 359         *
 360         * The only case where we really need more than one of
 361         * these are for architectures where we cannot disable
 362         * the MMU and must instead generate an identity mapped
 363         * page table for all of the memory.
 364         *
 365         * At worst this runs in O(N) of the image size.
 366         */
 367        struct list_head extra_pages;
 368        struct page *pages;
 369        unsigned int count;
 370
 371        count = 1 << order;
 372        INIT_LIST_HEAD(&extra_pages);
 373
 374        /* Loop while I can allocate a page and the page allocated
 375         * is a destination page.
 376         */
 377        do {
 378                unsigned long pfn, epfn, addr, eaddr;
 379
 380                pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
 381                if (!pages)
 382                        break;
 383                pfn   = page_to_boot_pfn(pages);
 384                epfn  = pfn + count;
 385                addr  = pfn << PAGE_SHIFT;
 386                eaddr = epfn << PAGE_SHIFT;
 387                if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 388                              kimage_is_destination_range(image, addr, eaddr)) {
 389                        list_add(&pages->lru, &extra_pages);
 390                        pages = NULL;
 391                }
 392        } while (!pages);
 393
 394        if (pages) {
 395                /* Remember the allocated page... */
 396                list_add(&pages->lru, &image->control_pages);
 397
 398                /* Because the page is already in it's destination
 399                 * location we will never allocate another page at
 400                 * that address.  Therefore kimage_alloc_pages
 401                 * will not return it (again) and we don't need
 402                 * to give it an entry in image->segment[].
 403                 */
 404        }
 405        /* Deal with the destination pages I have inadvertently allocated.
 406         *
 407         * Ideally I would convert multi-page allocations into single
 408         * page allocations, and add everything to image->dest_pages.
 409         *
 410         * For now it is simpler to just free the pages.
 411         */
 412        kimage_free_page_list(&extra_pages);
 413
 414        return pages;
 415}
 416
 417static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 418                                                      unsigned int order)
 419{
 420        /* Control pages are special, they are the intermediaries
 421         * that are needed while we copy the rest of the pages
 422         * to their final resting place.  As such they must
 423         * not conflict with either the destination addresses
 424         * or memory the kernel is already using.
 425         *
 426         * Control pages are also the only pags we must allocate
 427         * when loading a crash kernel.  All of the other pages
 428         * are specified by the segments and we just memcpy
 429         * into them directly.
 430         *
 431         * The only case where we really need more than one of
 432         * these are for architectures where we cannot disable
 433         * the MMU and must instead generate an identity mapped
 434         * page table for all of the memory.
 435         *
 436         * Given the low demand this implements a very simple
 437         * allocator that finds the first hole of the appropriate
 438         * size in the reserved memory region, and allocates all
 439         * of the memory up to and including the hole.
 440         */
 441        unsigned long hole_start, hole_end, size;
 442        struct page *pages;
 443
 444        pages = NULL;
 445        size = (1 << order) << PAGE_SHIFT;
 446        hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 447        hole_end   = hole_start + size - 1;
 448        while (hole_end <= crashk_res.end) {
 449                unsigned long i;
 450
 451                cond_resched();
 452
 453                if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 454                        break;
 455                /* See if I overlap any of the segments */
 456                for (i = 0; i < image->nr_segments; i++) {
 457                        unsigned long mstart, mend;
 458
 459                        mstart = image->segment[i].mem;
 460                        mend   = mstart + image->segment[i].memsz - 1;
 461                        if ((hole_end >= mstart) && (hole_start <= mend)) {
 462                                /* Advance the hole to the end of the segment */
 463                                hole_start = (mend + (size - 1)) & ~(size - 1);
 464                                hole_end   = hole_start + size - 1;
 465                                break;
 466                        }
 467                }
 468                /* If I don't overlap any segments I have found my hole! */
 469                if (i == image->nr_segments) {
 470                        pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 471                        image->control_page = hole_end;
 472                        break;
 473                }
 474        }
 475
 476        /* Ensure that these pages are decrypted if SME is enabled. */
 477        if (pages)
 478                arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
 479
 480        return pages;
 481}
 482
 483
 484struct page *kimage_alloc_control_pages(struct kimage *image,
 485                                         unsigned int order)
 486{
 487        struct page *pages = NULL;
 488
 489        switch (image->type) {
 490        case KEXEC_TYPE_DEFAULT:
 491                pages = kimage_alloc_normal_control_pages(image, order);
 492                break;
 493        case KEXEC_TYPE_CRASH:
 494                pages = kimage_alloc_crash_control_pages(image, order);
 495                break;
 496        }
 497
 498        return pages;
 499}
 500
 501int kimage_crash_copy_vmcoreinfo(struct kimage *image)
 502{
 503        struct page *vmcoreinfo_page;
 504        void *safecopy;
 505
 506        if (image->type != KEXEC_TYPE_CRASH)
 507                return 0;
 508
 509        /*
 510         * For kdump, allocate one vmcoreinfo safe copy from the
 511         * crash memory. as we have arch_kexec_protect_crashkres()
 512         * after kexec syscall, we naturally protect it from write
 513         * (even read) access under kernel direct mapping. But on
 514         * the other hand, we still need to operate it when crash
 515         * happens to generate vmcoreinfo note, hereby we rely on
 516         * vmap for this purpose.
 517         */
 518        vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
 519        if (!vmcoreinfo_page) {
 520                pr_warn("Could not allocate vmcoreinfo buffer\n");
 521                return -ENOMEM;
 522        }
 523        safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
 524        if (!safecopy) {
 525                pr_warn("Could not vmap vmcoreinfo buffer\n");
 526                return -ENOMEM;
 527        }
 528
 529        image->vmcoreinfo_data_copy = safecopy;
 530        crash_update_vmcoreinfo_safecopy(safecopy);
 531
 532        return 0;
 533}
 534
 535static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 536{
 537        if (*image->entry != 0)
 538                image->entry++;
 539
 540        if (image->entry == image->last_entry) {
 541                kimage_entry_t *ind_page;
 542                struct page *page;
 543
 544                page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 545                if (!page)
 546                        return -ENOMEM;
 547
 548                ind_page = page_address(page);
 549                *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
 550                image->entry = ind_page;
 551                image->last_entry = ind_page +
 552                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 553        }
 554        *image->entry = entry;
 555        image->entry++;
 556        *image->entry = 0;
 557
 558        return 0;
 559}
 560
 561static int kimage_set_destination(struct kimage *image,
 562                                   unsigned long destination)
 563{
 564        int result;
 565
 566        destination &= PAGE_MASK;
 567        result = kimage_add_entry(image, destination | IND_DESTINATION);
 568
 569        return result;
 570}
 571
 572
 573static int kimage_add_page(struct kimage *image, unsigned long page)
 574{
 575        int result;
 576
 577        page &= PAGE_MASK;
 578        result = kimage_add_entry(image, page | IND_SOURCE);
 579
 580        return result;
 581}
 582
 583
 584static void kimage_free_extra_pages(struct kimage *image)
 585{
 586        /* Walk through and free any extra destination pages I may have */
 587        kimage_free_page_list(&image->dest_pages);
 588
 589        /* Walk through and free any unusable pages I have cached */
 590        kimage_free_page_list(&image->unusable_pages);
 591
 592}
 593
 594int __weak machine_kexec_post_load(struct kimage *image)
 595{
 596        return 0;
 597}
 598
 599void kimage_terminate(struct kimage *image)
 600{
 601        if (*image->entry != 0)
 602                image->entry++;
 603
 604        *image->entry = IND_DONE;
 605}
 606
 607#define for_each_kimage_entry(image, ptr, entry) \
 608        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 609                ptr = (entry & IND_INDIRECTION) ? \
 610                        boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
 611
 612static void kimage_free_entry(kimage_entry_t entry)
 613{
 614        struct page *page;
 615
 616        page = boot_pfn_to_page(entry >> PAGE_SHIFT);
 617        kimage_free_pages(page);
 618}
 619
 620void kimage_free(struct kimage *image)
 621{
 622        kimage_entry_t *ptr, entry;
 623        kimage_entry_t ind = 0;
 624
 625        if (!image)
 626                return;
 627
 628        if (image->vmcoreinfo_data_copy) {
 629                crash_update_vmcoreinfo_safecopy(NULL);
 630                vunmap(image->vmcoreinfo_data_copy);
 631        }
 632
 633        kimage_free_extra_pages(image);
 634        for_each_kimage_entry(image, ptr, entry) {
 635                if (entry & IND_INDIRECTION) {
 636                        /* Free the previous indirection page */
 637                        if (ind & IND_INDIRECTION)
 638                                kimage_free_entry(ind);
 639                        /* Save this indirection page until we are
 640                         * done with it.
 641                         */
 642                        ind = entry;
 643                } else if (entry & IND_SOURCE)
 644                        kimage_free_entry(entry);
 645        }
 646        /* Free the final indirection page */
 647        if (ind & IND_INDIRECTION)
 648                kimage_free_entry(ind);
 649
 650        /* Handle any machine specific cleanup */
 651        machine_kexec_cleanup(image);
 652
 653        /* Free the kexec control pages... */
 654        kimage_free_page_list(&image->control_pages);
 655
 656        /*
 657         * Free up any temporary buffers allocated. This might hit if
 658         * error occurred much later after buffer allocation.
 659         */
 660        if (image->file_mode)
 661                kimage_file_post_load_cleanup(image);
 662
 663        kfree(image);
 664}
 665
 666static kimage_entry_t *kimage_dst_used(struct kimage *image,
 667                                        unsigned long page)
 668{
 669        kimage_entry_t *ptr, entry;
 670        unsigned long destination = 0;
 671
 672        for_each_kimage_entry(image, ptr, entry) {
 673                if (entry & IND_DESTINATION)
 674                        destination = entry & PAGE_MASK;
 675                else if (entry & IND_SOURCE) {
 676                        if (page == destination)
 677                                return ptr;
 678                        destination += PAGE_SIZE;
 679                }
 680        }
 681
 682        return NULL;
 683}
 684
 685static struct page *kimage_alloc_page(struct kimage *image,
 686                                        gfp_t gfp_mask,
 687                                        unsigned long destination)
 688{
 689        /*
 690         * Here we implement safeguards to ensure that a source page
 691         * is not copied to its destination page before the data on
 692         * the destination page is no longer useful.
 693         *
 694         * To do this we maintain the invariant that a source page is
 695         * either its own destination page, or it is not a
 696         * destination page at all.
 697         *
 698         * That is slightly stronger than required, but the proof
 699         * that no problems will not occur is trivial, and the
 700         * implementation is simply to verify.
 701         *
 702         * When allocating all pages normally this algorithm will run
 703         * in O(N) time, but in the worst case it will run in O(N^2)
 704         * time.   If the runtime is a problem the data structures can
 705         * be fixed.
 706         */
 707        struct page *page;
 708        unsigned long addr;
 709
 710        /*
 711         * Walk through the list of destination pages, and see if I
 712         * have a match.
 713         */
 714        list_for_each_entry(page, &image->dest_pages, lru) {
 715                addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 716                if (addr == destination) {
 717                        list_del(&page->lru);
 718                        return page;
 719                }
 720        }
 721        page = NULL;
 722        while (1) {
 723                kimage_entry_t *old;
 724
 725                /* Allocate a page, if we run out of memory give up */
 726                page = kimage_alloc_pages(gfp_mask, 0);
 727                if (!page)
 728                        return NULL;
 729                /* If the page cannot be used file it away */
 730                if (page_to_boot_pfn(page) >
 731                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 732                        list_add(&page->lru, &image->unusable_pages);
 733                        continue;
 734                }
 735                addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 736
 737                /* If it is the destination page we want use it */
 738                if (addr == destination)
 739                        break;
 740
 741                /* If the page is not a destination page use it */
 742                if (!kimage_is_destination_range(image, addr,
 743                                                  addr + PAGE_SIZE))
 744                        break;
 745
 746                /*
 747                 * I know that the page is someones destination page.
 748                 * See if there is already a source page for this
 749                 * destination page.  And if so swap the source pages.
 750                 */
 751                old = kimage_dst_used(image, addr);
 752                if (old) {
 753                        /* If so move it */
 754                        unsigned long old_addr;
 755                        struct page *old_page;
 756
 757                        old_addr = *old & PAGE_MASK;
 758                        old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
 759                        copy_highpage(page, old_page);
 760                        *old = addr | (*old & ~PAGE_MASK);
 761
 762                        /* The old page I have found cannot be a
 763                         * destination page, so return it if it's
 764                         * gfp_flags honor the ones passed in.
 765                         */
 766                        if (!(gfp_mask & __GFP_HIGHMEM) &&
 767                            PageHighMem(old_page)) {
 768                                kimage_free_pages(old_page);
 769                                continue;
 770                        }
 771                        addr = old_addr;
 772                        page = old_page;
 773                        break;
 774                }
 775                /* Place the page on the destination list, to be used later */
 776                list_add(&page->lru, &image->dest_pages);
 777        }
 778
 779        return page;
 780}
 781
 782static int kimage_load_normal_segment(struct kimage *image,
 783                                         struct kexec_segment *segment)
 784{
 785        unsigned long maddr;
 786        size_t ubytes, mbytes;
 787        int result;
 788        unsigned char __user *buf = NULL;
 789        unsigned char *kbuf = NULL;
 790
 791        result = 0;
 792        if (image->file_mode)
 793                kbuf = segment->kbuf;
 794        else
 795                buf = segment->buf;
 796        ubytes = segment->bufsz;
 797        mbytes = segment->memsz;
 798        maddr = segment->mem;
 799
 800        result = kimage_set_destination(image, maddr);
 801        if (result < 0)
 802                goto out;
 803
 804        while (mbytes) {
 805                struct page *page;
 806                char *ptr;
 807                size_t uchunk, mchunk;
 808
 809                page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 810                if (!page) {
 811                        result  = -ENOMEM;
 812                        goto out;
 813                }
 814                result = kimage_add_page(image, page_to_boot_pfn(page)
 815                                                                << PAGE_SHIFT);
 816                if (result < 0)
 817                        goto out;
 818
 819                ptr = kmap(page);
 820                /* Start with a clear page */
 821                clear_page(ptr);
 822                ptr += maddr & ~PAGE_MASK;
 823                mchunk = min_t(size_t, mbytes,
 824                                PAGE_SIZE - (maddr & ~PAGE_MASK));
 825                uchunk = min(ubytes, mchunk);
 826
 827                /* For file based kexec, source pages are in kernel memory */
 828                if (image->file_mode)
 829                        memcpy(ptr, kbuf, uchunk);
 830                else
 831                        result = copy_from_user(ptr, buf, uchunk);
 832                kunmap(page);
 833                if (result) {
 834                        result = -EFAULT;
 835                        goto out;
 836                }
 837                ubytes -= uchunk;
 838                maddr  += mchunk;
 839                if (image->file_mode)
 840                        kbuf += mchunk;
 841                else
 842                        buf += mchunk;
 843                mbytes -= mchunk;
 844
 845                cond_resched();
 846        }
 847out:
 848        return result;
 849}
 850
 851static int kimage_load_crash_segment(struct kimage *image,
 852                                        struct kexec_segment *segment)
 853{
 854        /* For crash dumps kernels we simply copy the data from
 855         * user space to it's destination.
 856         * We do things a page at a time for the sake of kmap.
 857         */
 858        unsigned long maddr;
 859        size_t ubytes, mbytes;
 860        int result;
 861        unsigned char __user *buf = NULL;
 862        unsigned char *kbuf = NULL;
 863
 864        result = 0;
 865        if (image->file_mode)
 866                kbuf = segment->kbuf;
 867        else
 868                buf = segment->buf;
 869        ubytes = segment->bufsz;
 870        mbytes = segment->memsz;
 871        maddr = segment->mem;
 872        while (mbytes) {
 873                struct page *page;
 874                char *ptr;
 875                size_t uchunk, mchunk;
 876
 877                page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
 878                if (!page) {
 879                        result  = -ENOMEM;
 880                        goto out;
 881                }
 882                arch_kexec_post_alloc_pages(page_address(page), 1, 0);
 883                ptr = kmap(page);
 884                ptr += maddr & ~PAGE_MASK;
 885                mchunk = min_t(size_t, mbytes,
 886                                PAGE_SIZE - (maddr & ~PAGE_MASK));
 887                uchunk = min(ubytes, mchunk);
 888                if (mchunk > uchunk) {
 889                        /* Zero the trailing part of the page */
 890                        memset(ptr + uchunk, 0, mchunk - uchunk);
 891                }
 892
 893                /* For file based kexec, source pages are in kernel memory */
 894                if (image->file_mode)
 895                        memcpy(ptr, kbuf, uchunk);
 896                else
 897                        result = copy_from_user(ptr, buf, uchunk);
 898                kexec_flush_icache_page(page);
 899                kunmap(page);
 900                arch_kexec_pre_free_pages(page_address(page), 1);
 901                if (result) {
 902                        result = -EFAULT;
 903                        goto out;
 904                }
 905                ubytes -= uchunk;
 906                maddr  += mchunk;
 907                if (image->file_mode)
 908                        kbuf += mchunk;
 909                else
 910                        buf += mchunk;
 911                mbytes -= mchunk;
 912
 913                cond_resched();
 914        }
 915out:
 916        return result;
 917}
 918
 919int kimage_load_segment(struct kimage *image,
 920                                struct kexec_segment *segment)
 921{
 922        int result = -ENOMEM;
 923
 924        switch (image->type) {
 925        case KEXEC_TYPE_DEFAULT:
 926                result = kimage_load_normal_segment(image, segment);
 927                break;
 928        case KEXEC_TYPE_CRASH:
 929                result = kimage_load_crash_segment(image, segment);
 930                break;
 931        }
 932
 933        return result;
 934}
 935
 936struct kimage *kexec_image;
 937struct kimage *kexec_crash_image;
 938int kexec_load_disabled;
 939
 940/*
 941 * No panic_cpu check version of crash_kexec().  This function is called
 942 * only when panic_cpu holds the current CPU number; this is the only CPU
 943 * which processes crash_kexec routines.
 944 */
 945void __noclone __crash_kexec(struct pt_regs *regs)
 946{
 947        /* Take the kexec_mutex here to prevent sys_kexec_load
 948         * running on one cpu from replacing the crash kernel
 949         * we are using after a panic on a different cpu.
 950         *
 951         * If the crash kernel was not located in a fixed area
 952         * of memory the xchg(&kexec_crash_image) would be
 953         * sufficient.  But since I reuse the memory...
 954         */
 955        if (mutex_trylock(&kexec_mutex)) {
 956                if (kexec_crash_image) {
 957                        struct pt_regs fixed_regs;
 958
 959                        crash_setup_regs(&fixed_regs, regs);
 960                        crash_save_vmcoreinfo();
 961                        machine_crash_shutdown(&fixed_regs);
 962                        machine_kexec(kexec_crash_image);
 963                }
 964                mutex_unlock(&kexec_mutex);
 965        }
 966}
 967STACK_FRAME_NON_STANDARD(__crash_kexec);
 968
 969void crash_kexec(struct pt_regs *regs)
 970{
 971        int old_cpu, this_cpu;
 972
 973        /*
 974         * Only one CPU is allowed to execute the crash_kexec() code as with
 975         * panic().  Otherwise parallel calls of panic() and crash_kexec()
 976         * may stop each other.  To exclude them, we use panic_cpu here too.
 977         */
 978        this_cpu = raw_smp_processor_id();
 979        old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
 980        if (old_cpu == PANIC_CPU_INVALID) {
 981                /* This is the 1st CPU which comes here, so go ahead. */
 982                printk_safe_flush_on_panic();
 983                __crash_kexec(regs);
 984
 985                /*
 986                 * Reset panic_cpu to allow another panic()/crash_kexec()
 987                 * call.
 988                 */
 989                atomic_set(&panic_cpu, PANIC_CPU_INVALID);
 990        }
 991}
 992
 993size_t crash_get_memory_size(void)
 994{
 995        size_t size = 0;
 996
 997        mutex_lock(&kexec_mutex);
 998        if (crashk_res.end != crashk_res.start)
 999                size = resource_size(&crashk_res);
1000        mutex_unlock(&kexec_mutex);
1001        return size;
1002}
1003
1004void __weak crash_free_reserved_phys_range(unsigned long begin,
1005                                           unsigned long end)
1006{
1007        unsigned long addr;
1008
1009        for (addr = begin; addr < end; addr += PAGE_SIZE)
1010                free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
1011}
1012
1013int crash_shrink_memory(unsigned long new_size)
1014{
1015        int ret = 0;
1016        unsigned long start, end;
1017        unsigned long old_size;
1018        struct resource *ram_res;
1019
1020        mutex_lock(&kexec_mutex);
1021
1022        if (kexec_crash_image) {
1023                ret = -ENOENT;
1024                goto unlock;
1025        }
1026        start = crashk_res.start;
1027        end = crashk_res.end;
1028        old_size = (end == 0) ? 0 : end - start + 1;
1029        if (new_size >= old_size) {
1030                ret = (new_size == old_size) ? 0 : -EINVAL;
1031                goto unlock;
1032        }
1033
1034        ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1035        if (!ram_res) {
1036                ret = -ENOMEM;
1037                goto unlock;
1038        }
1039
1040        start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1041        end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1042
1043        crash_free_reserved_phys_range(end, crashk_res.end);
1044
1045        if ((start == end) && (crashk_res.parent != NULL))
1046                release_resource(&crashk_res);
1047
1048        ram_res->start = end;
1049        ram_res->end = crashk_res.end;
1050        ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1051        ram_res->name = "System RAM";
1052
1053        crashk_res.end = end - 1;
1054
1055        insert_resource(&iomem_resource, ram_res);
1056
1057unlock:
1058        mutex_unlock(&kexec_mutex);
1059        return ret;
1060}
1061
1062void crash_save_cpu(struct pt_regs *regs, int cpu)
1063{
1064        struct elf_prstatus prstatus;
1065        u32 *buf;
1066
1067        if ((cpu < 0) || (cpu >= nr_cpu_ids))
1068                return;
1069
1070        /* Using ELF notes here is opportunistic.
1071         * I need a well defined structure format
1072         * for the data I pass, and I need tags
1073         * on the data to indicate what information I have
1074         * squirrelled away.  ELF notes happen to provide
1075         * all of that, so there is no need to invent something new.
1076         */
1077        buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1078        if (!buf)
1079                return;
1080        memset(&prstatus, 0, sizeof(prstatus));
1081        prstatus.common.pr_pid = current->pid;
1082        elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1083        buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1084                              &prstatus, sizeof(prstatus));
1085        final_note(buf);
1086}
1087
1088static int __init crash_notes_memory_init(void)
1089{
1090        /* Allocate memory for saving cpu registers. */
1091        size_t size, align;
1092
1093        /*
1094         * crash_notes could be allocated across 2 vmalloc pages when percpu
1095         * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1096         * pages are also on 2 continuous physical pages. In this case the
1097         * 2nd part of crash_notes in 2nd page could be lost since only the
1098         * starting address and size of crash_notes are exported through sysfs.
1099         * Here round up the size of crash_notes to the nearest power of two
1100         * and pass it to __alloc_percpu as align value. This can make sure
1101         * crash_notes is allocated inside one physical page.
1102         */
1103        size = sizeof(note_buf_t);
1104        align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1105
1106        /*
1107         * Break compile if size is bigger than PAGE_SIZE since crash_notes
1108         * definitely will be in 2 pages with that.
1109         */
1110        BUILD_BUG_ON(size > PAGE_SIZE);
1111
1112        crash_notes = __alloc_percpu(size, align);
1113        if (!crash_notes) {
1114                pr_warn("Memory allocation for saving cpu register states failed\n");
1115                return -ENOMEM;
1116        }
1117        return 0;
1118}
1119subsys_initcall(crash_notes_memory_init);
1120
1121
1122/*
1123 * Move into place and start executing a preloaded standalone
1124 * executable.  If nothing was preloaded return an error.
1125 */
1126int kernel_kexec(void)
1127{
1128        int error = 0;
1129
1130        if (!mutex_trylock(&kexec_mutex))
1131                return -EBUSY;
1132        if (!kexec_image) {
1133                error = -EINVAL;
1134                goto Unlock;
1135        }
1136
1137#ifdef CONFIG_KEXEC_JUMP
1138        if (kexec_image->preserve_context) {
1139                pm_prepare_console();
1140                error = freeze_processes();
1141                if (error) {
1142                        error = -EBUSY;
1143                        goto Restore_console;
1144                }
1145                suspend_console();
1146                error = dpm_suspend_start(PMSG_FREEZE);
1147                if (error)
1148                        goto Resume_console;
1149                /* At this point, dpm_suspend_start() has been called,
1150                 * but *not* dpm_suspend_end(). We *must* call
1151                 * dpm_suspend_end() now.  Otherwise, drivers for
1152                 * some devices (e.g. interrupt controllers) become
1153                 * desynchronized with the actual state of the
1154                 * hardware at resume time, and evil weirdness ensues.
1155                 */
1156                error = dpm_suspend_end(PMSG_FREEZE);
1157                if (error)
1158                        goto Resume_devices;
1159                error = suspend_disable_secondary_cpus();
1160                if (error)
1161                        goto Enable_cpus;
1162                local_irq_disable();
1163                error = syscore_suspend();
1164                if (error)
1165                        goto Enable_irqs;
1166        } else
1167#endif
1168        {
1169                kexec_in_progress = true;
1170                kernel_restart_prepare("kexec reboot");
1171                migrate_to_reboot_cpu();
1172
1173                /*
1174                 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1175                 * no further code needs to use CPU hotplug (which is true in
1176                 * the reboot case). However, the kexec path depends on using
1177                 * CPU hotplug again; so re-enable it here.
1178                 */
1179                cpu_hotplug_enable();
1180                pr_notice("Starting new kernel\n");
1181                machine_shutdown();
1182        }
1183
1184        kmsg_dump(KMSG_DUMP_SHUTDOWN);
1185        machine_kexec(kexec_image);
1186
1187#ifdef CONFIG_KEXEC_JUMP
1188        if (kexec_image->preserve_context) {
1189                syscore_resume();
1190 Enable_irqs:
1191                local_irq_enable();
1192 Enable_cpus:
1193                suspend_enable_secondary_cpus();
1194                dpm_resume_start(PMSG_RESTORE);
1195 Resume_devices:
1196                dpm_resume_end(PMSG_RESTORE);
1197 Resume_console:
1198                resume_console();
1199                thaw_processes();
1200 Restore_console:
1201                pm_restore_console();
1202        }
1203#endif
1204
1205 Unlock:
1206        mutex_unlock(&kexec_mutex);
1207        return error;
1208}
1209
1210/*
1211 * Protection mechanism for crashkernel reserved memory after
1212 * the kdump kernel is loaded.
1213 *
1214 * Provide an empty default implementation here -- architecture
1215 * code may override this
1216 */
1217void __weak arch_kexec_protect_crashkres(void)
1218{}
1219
1220void __weak arch_kexec_unprotect_crashkres(void)
1221{}
1222