linux/arch/s390/kernel/machine_kexec.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright IBM Corp. 2005, 2011
   4 *
   5 * Author(s): Rolf Adelsberger,
   6 *            Heiko Carstens <heiko.carstens@de.ibm.com>
   7 *            Michael Holzheu <holzheu@linux.vnet.ibm.com>
   8 */
   9
  10#include <linux/device.h>
  11#include <linux/mm.h>
  12#include <linux/kexec.h>
  13#include <linux/delay.h>
  14#include <linux/reboot.h>
  15#include <linux/ftrace.h>
  16#include <linux/debug_locks.h>
  17#include <asm/cio.h>
  18#include <asm/setup.h>
  19#include <asm/smp.h>
  20#include <asm/ipl.h>
  21#include <asm/diag.h>
  22#include <asm/elf.h>
  23#include <asm/asm-offsets.h>
  24#include <asm/cacheflush.h>
  25#include <asm/os_info.h>
  26#include <asm/set_memory.h>
  27#include <asm/stacktrace.h>
  28#include <asm/switch_to.h>
  29#include <asm/nmi.h>
  30
  31typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
  32
  33extern const unsigned char relocate_kernel[];
  34extern const unsigned long long relocate_kernel_len;
  35
  36#ifdef CONFIG_CRASH_DUMP
  37
  38/*
  39 * Reset the system, copy boot CPU registers to absolute zero,
  40 * and jump to the kdump image
  41 */
  42static void __do_machine_kdump(void *image)
  43{
  44        int (*start_kdump)(int);
  45        unsigned long prefix;
  46
  47        /* store_status() saved the prefix register to lowcore */
  48        prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
  49
  50        /* Now do the reset  */
  51        s390_reset_system();
  52
  53        /*
  54         * Copy dump CPU store status info to absolute zero.
  55         * This need to be done *after* s390_reset_system set the
  56         * prefix register of this CPU to zero
  57         */
  58        memcpy((void *) __LC_FPREGS_SAVE_AREA,
  59               (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
  60
  61        __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
  62        start_kdump = (void *)((struct kimage *) image)->start;
  63        start_kdump(1);
  64
  65        /* Die if start_kdump returns */
  66        disabled_wait();
  67}
  68
  69/*
  70 * Start kdump: create a LGR log entry, store status of all CPUs and
  71 * branch to __do_machine_kdump.
  72 */
  73static noinline void __machine_kdump(void *image)
  74{
  75        struct mcesa *mcesa;
  76        union ctlreg2 cr2_old, cr2_new;
  77        int this_cpu, cpu;
  78
  79        lgr_info_log();
  80        /* Get status of the other CPUs */
  81        this_cpu = smp_find_processor_id(stap());
  82        for_each_online_cpu(cpu) {
  83                if (cpu == this_cpu)
  84                        continue;
  85                if (smp_store_status(cpu))
  86                        continue;
  87        }
  88        /* Store status of the boot CPU */
  89        mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
  90        if (MACHINE_HAS_VX)
  91                save_vx_regs((__vector128 *) mcesa->vector_save_area);
  92        if (MACHINE_HAS_GS) {
  93                __ctl_store(cr2_old.val, 2, 2);
  94                cr2_new = cr2_old;
  95                cr2_new.gse = 1;
  96                __ctl_load(cr2_new.val, 2, 2);
  97                save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
  98                __ctl_load(cr2_old.val, 2, 2);
  99        }
 100        /*
 101         * To create a good backchain for this CPU in the dump store_status
 102         * is passed the address of a function. The address is saved into
 103         * the PSW save area of the boot CPU and the function is invoked as
 104         * a tail call of store_status. The backchain in the dump will look
 105         * like this:
 106         *   restart_int_handler ->  __machine_kexec -> __do_machine_kdump
 107         * The call to store_status() will not return.
 108         */
 109        store_status(__do_machine_kdump, image);
 110}
 111
 112static unsigned long do_start_kdump(unsigned long addr)
 113{
 114        struct kimage *image = (struct kimage *) addr;
 115        int (*start_kdump)(int) = (void *)image->start;
 116        int rc;
 117
 118        __arch_local_irq_stnsm(0xfb); /* disable DAT */
 119        rc = start_kdump(0);
 120        __arch_local_irq_stosm(0x04); /* enable DAT */
 121        return rc;
 122}
 123
 124#endif /* CONFIG_CRASH_DUMP */
 125
 126/*
 127 * Check if kdump checksums are valid: We call purgatory with parameter "0"
 128 */
 129static bool kdump_csum_valid(struct kimage *image)
 130{
 131#ifdef CONFIG_CRASH_DUMP
 132        int rc;
 133
 134        preempt_disable();
 135        rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
 136        preempt_enable();
 137        return rc == 0;
 138#else
 139        return false;
 140#endif
 141}
 142
 143#ifdef CONFIG_CRASH_DUMP
 144
 145void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
 146{
 147        unsigned long addr, size;
 148
 149        for (addr = begin; addr < end; addr += PAGE_SIZE)
 150                free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
 151        size = begin - crashk_res.start;
 152        if (size)
 153                os_info_crashkernel_add(crashk_res.start, size);
 154        else
 155                os_info_crashkernel_add(0, 0);
 156}
 157
 158static void crash_protect_pages(int protect)
 159{
 160        unsigned long size;
 161
 162        if (!crashk_res.end)
 163                return;
 164        size = resource_size(&crashk_res);
 165        if (protect)
 166                set_memory_ro(crashk_res.start, size >> PAGE_SHIFT);
 167        else
 168                set_memory_rw(crashk_res.start, size >> PAGE_SHIFT);
 169}
 170
 171void arch_kexec_protect_crashkres(void)
 172{
 173        crash_protect_pages(1);
 174}
 175
 176void arch_kexec_unprotect_crashkres(void)
 177{
 178        crash_protect_pages(0);
 179}
 180
 181#endif
 182
 183/*
 184 * Give back memory to hypervisor before new kdump is loaded
 185 */
 186static int machine_kexec_prepare_kdump(void)
 187{
 188#ifdef CONFIG_CRASH_DUMP
 189        if (MACHINE_IS_VM)
 190                diag10_range(PFN_DOWN(crashk_res.start),
 191                             PFN_DOWN(crashk_res.end - crashk_res.start + 1));
 192        return 0;
 193#else
 194        return -EINVAL;
 195#endif
 196}
 197
 198int machine_kexec_prepare(struct kimage *image)
 199{
 200        void *reboot_code_buffer;
 201
 202        if (image->type == KEXEC_TYPE_CRASH)
 203                return machine_kexec_prepare_kdump();
 204
 205        /* We don't support anything but the default image type for now. */
 206        if (image->type != KEXEC_TYPE_DEFAULT)
 207                return -EINVAL;
 208
 209        /* Get the destination where the assembler code should be copied to.*/
 210        reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
 211
 212        /* Then copy it */
 213        memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
 214        return 0;
 215}
 216
 217void machine_kexec_cleanup(struct kimage *image)
 218{
 219}
 220
 221void arch_crash_save_vmcoreinfo(void)
 222{
 223        VMCOREINFO_SYMBOL(lowcore_ptr);
 224        VMCOREINFO_SYMBOL(high_memory);
 225        VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
 226        vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
 227        vmcoreinfo_append_str("EDMA=%lx\n", __edma);
 228        vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
 229        mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
 230}
 231
 232void machine_shutdown(void)
 233{
 234}
 235
 236void machine_crash_shutdown(struct pt_regs *regs)
 237{
 238        set_os_info_reipl_block();
 239}
 240
 241/*
 242 * Do normal kexec
 243 */
 244static void __do_machine_kexec(void *data)
 245{
 246        relocate_kernel_t data_mover;
 247        struct kimage *image = data;
 248
 249        s390_reset_system();
 250        data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
 251
 252        __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
 253        /* Call the moving routine */
 254        (*data_mover)(&image->head, image->start);
 255
 256        /* Die if kexec returns */
 257        disabled_wait();
 258}
 259
 260/*
 261 * Reset system and call either kdump or normal kexec
 262 */
 263static void __machine_kexec(void *data)
 264{
 265        __arch_local_irq_stosm(0x04); /* enable DAT */
 266        pfault_fini();
 267        tracing_off();
 268        debug_locks_off();
 269#ifdef CONFIG_CRASH_DUMP
 270        if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
 271                __machine_kdump(data);
 272#endif
 273        __do_machine_kexec(data);
 274}
 275
 276/*
 277 * Do either kdump or normal kexec. In case of kdump we first ask
 278 * purgatory, if kdump checksums are valid.
 279 */
 280void machine_kexec(struct kimage *image)
 281{
 282        if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
 283                return;
 284        tracer_disable();
 285        smp_send_stop();
 286        smp_call_ipl_cpu(__machine_kexec, image);
 287}
 288