linux/arch/s390/kernel/machine_kexec.c
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 2005, 2011
   3 *
   4 * Author(s): Rolf Adelsberger,
   5 *            Heiko Carstens <heiko.carstens@de.ibm.com>
   6 *            Michael Holzheu <holzheu@linux.vnet.ibm.com>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/mm.h>
  11#include <linux/kexec.h>
  12#include <linux/delay.h>
  13#include <linux/reboot.h>
  14#include <linux/ftrace.h>
  15#include <linux/debug_locks.h>
  16#include <linux/suspend.h>
  17#include <asm/cio.h>
  18#include <asm/setup.h>
  19#include <asm/pgtable.h>
  20#include <asm/pgalloc.h>
  21#include <asm/smp.h>
  22#include <asm/reset.h>
  23#include <asm/ipl.h>
  24#include <asm/diag.h>
  25#include <asm/elf.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/os_info.h>
  28
  29typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
  30
  31extern const unsigned char relocate_kernel[];
  32extern const unsigned long long relocate_kernel_len;
  33
  34#ifdef CONFIG_CRASH_DUMP
  35
  36/*
  37 * Create ELF notes for one CPU
  38 */
  39static void add_elf_notes(int cpu)
  40{
  41        struct save_area *sa = (void *) 4608 + store_prefix();
  42        void *ptr;
  43
  44        memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
  45        ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
  46        ptr = fill_cpu_elf_notes(ptr, sa);
  47        memset(ptr, 0, sizeof(struct elf_note));
  48}
  49
  50/*
  51 * Initialize CPU ELF notes
  52 */
  53static void setup_regs(void)
  54{
  55        unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
  56        int cpu, this_cpu;
  57
  58        this_cpu = smp_find_processor_id(stap());
  59        add_elf_notes(this_cpu);
  60        for_each_online_cpu(cpu) {
  61                if (cpu == this_cpu)
  62                        continue;
  63                if (smp_store_status(cpu))
  64                        continue;
  65                add_elf_notes(cpu);
  66        }
  67        /* Copy dump CPU store status info to absolute zero */
  68        memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
  69}
  70
  71/*
  72 * PM notifier callback for kdump
  73 */
  74static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
  75                               void *ptr)
  76{
  77        switch (action) {
  78        case PM_SUSPEND_PREPARE:
  79        case PM_HIBERNATION_PREPARE:
  80                if (crashk_res.start)
  81                        crash_map_reserved_pages();
  82                break;
  83        case PM_POST_SUSPEND:
  84        case PM_POST_HIBERNATION:
  85                if (crashk_res.start)
  86                        crash_unmap_reserved_pages();
  87                break;
  88        default:
  89                return NOTIFY_DONE;
  90        }
  91        return NOTIFY_OK;
  92}
  93
  94static int __init machine_kdump_pm_init(void)
  95{
  96        pm_notifier(machine_kdump_pm_cb, 0);
  97        return 0;
  98}
  99arch_initcall(machine_kdump_pm_init);
 100#endif
 101
 102/*
 103 * Start kdump: We expect here that a store status has been done on our CPU
 104 */
 105static void __do_machine_kdump(void *image)
 106{
 107#ifdef CONFIG_CRASH_DUMP
 108        int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
 109
 110        setup_regs();
 111        __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
 112        start_kdump(1);
 113#endif
 114}
 115
 116/*
 117 * Check if kdump checksums are valid: We call purgatory with parameter "0"
 118 */
 119static int kdump_csum_valid(struct kimage *image)
 120{
 121#ifdef CONFIG_CRASH_DUMP
 122        int (*start_kdump)(int) = (void *)image->start;
 123        int rc;
 124
 125        __arch_local_irq_stnsm(0xfb); /* disable DAT */
 126        rc = start_kdump(0);
 127        __arch_local_irq_stosm(0x04); /* enable DAT */
 128        return rc ? 0 : -EINVAL;
 129#else
 130        return -EINVAL;
 131#endif
 132}
 133
 134/*
 135 * Map or unmap crashkernel memory
 136 */
 137static void crash_map_pages(int enable)
 138{
 139        unsigned long size = resource_size(&crashk_res);
 140
 141        BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN ||
 142               size % KEXEC_CRASH_MEM_ALIGN);
 143        if (enable)
 144                vmem_add_mapping(crashk_res.start, size);
 145        else {
 146                vmem_remove_mapping(crashk_res.start, size);
 147                if (size)
 148                        os_info_crashkernel_add(crashk_res.start, size);
 149                else
 150                        os_info_crashkernel_add(0, 0);
 151        }
 152}
 153
 154/*
 155 * Map crashkernel memory
 156 */
 157void crash_map_reserved_pages(void)
 158{
 159        crash_map_pages(1);
 160}
 161
 162/*
 163 * Unmap crashkernel memory
 164 */
 165void crash_unmap_reserved_pages(void)
 166{
 167        crash_map_pages(0);
 168}
 169
 170/*
 171 * Give back memory to hypervisor before new kdump is loaded
 172 */
 173static int machine_kexec_prepare_kdump(void)
 174{
 175#ifdef CONFIG_CRASH_DUMP
 176        if (MACHINE_IS_VM)
 177                diag10_range(PFN_DOWN(crashk_res.start),
 178                             PFN_DOWN(crashk_res.end - crashk_res.start + 1));
 179        return 0;
 180#else
 181        return -EINVAL;
 182#endif
 183}
 184
 185int machine_kexec_prepare(struct kimage *image)
 186{
 187        void *reboot_code_buffer;
 188
 189        /* Can't replace kernel image since it is read-only. */
 190        if (ipl_flags & IPL_NSS_VALID)
 191                return -EOPNOTSUPP;
 192
 193        if (image->type == KEXEC_TYPE_CRASH)
 194                return machine_kexec_prepare_kdump();
 195
 196        /* We don't support anything but the default image type for now. */
 197        if (image->type != KEXEC_TYPE_DEFAULT)
 198                return -EINVAL;
 199
 200        /* Get the destination where the assembler code should be copied to.*/
 201        reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
 202
 203        /* Then copy it */
 204        memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
 205        return 0;
 206}
 207
 208void machine_kexec_cleanup(struct kimage *image)
 209{
 210}
 211
 212void arch_crash_save_vmcoreinfo(void)
 213{
 214        VMCOREINFO_SYMBOL(lowcore_ptr);
 215        VMCOREINFO_SYMBOL(high_memory);
 216        VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
 217}
 218
 219void machine_shutdown(void)
 220{
 221}
 222
 223void machine_crash_shutdown(struct pt_regs *regs)
 224{
 225}
 226
 227/*
 228 * Do normal kexec
 229 */
 230static void __do_machine_kexec(void *data)
 231{
 232        relocate_kernel_t data_mover;
 233        struct kimage *image = data;
 234
 235        data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
 236
 237        /* Call the moving routine */
 238        (*data_mover)(&image->head, image->start);
 239}
 240
 241/*
 242 * Reset system and call either kdump or normal kexec
 243 */
 244static void __machine_kexec(void *data)
 245{
 246        struct kimage *image = data;
 247
 248        __arch_local_irq_stosm(0x04); /* enable DAT */
 249        pfault_fini();
 250        tracing_off();
 251        debug_locks_off();
 252        if (image->type == KEXEC_TYPE_CRASH) {
 253                lgr_info_log();
 254                s390_reset_system(__do_machine_kdump, data);
 255        } else {
 256                s390_reset_system(__do_machine_kexec, data);
 257        }
 258        disabled_wait((unsigned long) __builtin_return_address(0));
 259}
 260
 261/*
 262 * Do either kdump or normal kexec. In case of kdump we first ask
 263 * purgatory, if kdump checksums are valid.
 264 */
 265void machine_kexec(struct kimage *image)
 266{
 267        if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
 268                return;
 269        tracer_disable();
 270        smp_send_stop();
 271        smp_call_ipl_cpu(__machine_kexec, image);
 272}
 273