linux/arch/tile/kernel/machine_kexec.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * based on machine_kexec.c from other architectures in linux-2.6.18
  15 */
  16
  17#include <linux/mm.h>
  18#include <linux/kexec.h>
  19#include <linux/delay.h>
  20#include <linux/reboot.h>
  21#include <linux/errno.h>
  22#include <linux/vmalloc.h>
  23#include <linux/cpumask.h>
  24#include <linux/kernel.h>
  25#include <linux/elf.h>
  26#include <linux/highmem.h>
  27#include <linux/mmu_context.h>
  28#include <linux/io.h>
  29#include <linux/timex.h>
  30#include <asm/pgtable.h>
  31#include <asm/pgalloc.h>
  32#include <asm/cacheflush.h>
  33#include <asm/checksum.h>
  34#include <asm/tlbflush.h>
  35#include <asm/homecache.h>
  36#include <hv/hypervisor.h>
  37
  38
  39/*
  40 * This stuff is not in elf.h and is not in any other kernel include.
  41 * This stuff is needed below in the little boot notes parser to
  42 * extract the command line so we can pass it to the hypervisor.
  43 */
  44struct Elf32_Bhdr {
  45        Elf32_Word b_signature;
  46        Elf32_Word b_size;
  47        Elf32_Half b_checksum;
  48        Elf32_Half b_records;
  49};
  50#define ELF_BOOT_MAGIC          0x0E1FB007
  51#define EBN_COMMAND_LINE        0x00000004
  52#define roundupsz(X) (((X) + 3) & ~3)
  53
  54/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
  55
  56
  57void machine_shutdown(void)
  58{
  59        /*
  60         * Normally we would stop all the other processors here, but
  61         * the check in machine_kexec_prepare below ensures we'll only
  62         * get this far if we've been booted with "nosmp" on the
  63         * command line or without CONFIG_SMP so there's nothing to do
  64         * here (for now).
  65         */
  66}
  67
  68void machine_crash_shutdown(struct pt_regs *regs)
  69{
  70        /*
  71         * Cannot happen.  This type of kexec is disabled on this
  72         * architecture (and enforced in machine_kexec_prepare below).
  73         */
  74}
  75
  76
  77int machine_kexec_prepare(struct kimage *image)
  78{
  79        if (num_online_cpus() > 1) {
  80                pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
  81                        __func__);
  82                return -ENOSYS;
  83        }
  84        if (image->type != KEXEC_TYPE_DEFAULT) {
  85                pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
  86                        __func__, image->type);
  87                return -ENOSYS;
  88        }
  89        return 0;
  90}
  91
  92void machine_kexec_cleanup(struct kimage *image)
  93{
  94        /*
  95         * We did nothing in machine_kexec_prepare,
  96         * so we have nothing to do here.
  97         */
  98}
  99
 100/*
 101 * If we can find elf boot notes on this page, return the command
 102 * line.  Otherwise, silently return null.  Somewhat kludgy, but no
 103 * good way to do this without significantly rearchitecting the
 104 * architecture-independent kexec code.
 105 */
 106
 107static unsigned char *kexec_bn2cl(void *pg)
 108{
 109        struct Elf32_Bhdr *bhdrp;
 110        Elf32_Nhdr *nhdrp;
 111        unsigned char *desc;
 112        unsigned char *command_line;
 113        __sum16 csum;
 114
 115        bhdrp = (struct Elf32_Bhdr *) pg;
 116
 117        /*
 118         * This routine is invoked for every source page, so make
 119         * sure to quietly ignore every impossible page.
 120         */
 121        if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
 122            bhdrp->b_size > PAGE_SIZE)
 123                return 0;
 124
 125        /*
 126         * If we get a checksum mismatch, warn with the checksum
 127         * so we can diagnose better.
 128         */
 129        csum = ip_compute_csum(pg, bhdrp->b_size);
 130        if (csum != 0) {
 131                pr_warn("%s: bad checksum %#x (size %d)\n",
 132                        __func__, csum, bhdrp->b_size);
 133                return 0;
 134        }
 135
 136        nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
 137
 138        while (nhdrp->n_type != EBN_COMMAND_LINE) {
 139
 140                desc = (unsigned char *) (nhdrp + 1);
 141                desc += roundupsz(nhdrp->n_descsz);
 142
 143                nhdrp = (Elf32_Nhdr *) desc;
 144
 145                /* still in bounds? */
 146                if ((unsigned char *) (nhdrp + 1) >
 147                    ((unsigned char *) pg) + bhdrp->b_size) {
 148
 149                        pr_info("%s: out of bounds\n", __func__);
 150                        return 0;
 151                }
 152        }
 153
 154        command_line = (unsigned char *) (nhdrp + 1);
 155        desc = command_line;
 156
 157        while (*desc != '\0') {
 158                desc++;
 159                if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
 160                        pr_info("%s: ran off end of page\n", __func__);
 161                        return 0;
 162                }
 163        }
 164
 165        return command_line;
 166}
 167
 168static void kexec_find_and_set_command_line(struct kimage *image)
 169{
 170        kimage_entry_t *ptr, entry;
 171
 172        unsigned char *command_line = 0;
 173        unsigned char *r;
 174        HV_Errno hverr;
 175
 176        for (ptr = &image->head;
 177             (entry = *ptr) && !(entry & IND_DONE);
 178             ptr = (entry & IND_INDIRECTION) ?
 179                     phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
 180
 181                if ((entry & IND_SOURCE)) {
 182                        void *va =
 183                                kmap_atomic_pfn(entry >> PAGE_SHIFT);
 184                        r = kexec_bn2cl(va);
 185                        if (r) {
 186                                command_line = r;
 187                                break;
 188                        }
 189                        kunmap_atomic(va);
 190                }
 191        }
 192
 193        if (command_line != 0) {
 194                pr_info("setting new command line to \"%s\"\n", command_line);
 195
 196                hverr = hv_set_command_line(
 197                        (HV_VirtAddr) command_line, strlen(command_line));
 198                kunmap_atomic(command_line);
 199        } else {
 200                pr_info("%s: no command line found; making empty\n", __func__);
 201                hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
 202        }
 203        if (hverr)
 204                pr_warn("%s: hv_set_command_line returned error: %d\n",
 205                        __func__, hverr);
 206}
 207
 208/*
 209 * The kexec code range-checks all its PAs, so to avoid having it run
 210 * amok and allocate memory and then sequester it from every other
 211 * controller, we force it to come from controller zero.  We also
 212 * disable the oom-killer since if we do end up running out of memory,
 213 * that almost certainly won't help.
 214 */
 215struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
 216{
 217        gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
 218        return alloc_pages_node(0, gfp_mask, order);
 219}
 220
 221/*
 222 * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
 223 * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
 224 * for tilepro, while for tilegx, we limit it to entire middle level page
 225 * table which we assume has been allocated and is undoubtedly large enough.
 226 */
 227#ifndef __tilegx__
 228#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
 229#else
 230#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
 231#endif
 232
 233static void setup_quasi_va_is_pa(void)
 234{
 235        HV_PTE pte;
 236        unsigned long i;
 237
 238        /*
 239         * Flush our TLB to prevent conflicts between the previous contents
 240         * and the new stuff we're about to add.
 241         */
 242        local_flush_tlb_all();
 243
 244        /*
 245         * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
 246         * Note here we assume that level-1 page table is defined by
 247         * HPAGE_SIZE.
 248         */
 249        pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
 250        pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
 251        for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
 252                unsigned long vaddr = i << HPAGE_SHIFT;
 253                pgd_t *pgd = pgd_offset(current->mm, vaddr);
 254                pud_t *pud = pud_offset(pgd, vaddr);
 255                pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
 256                unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
 257
 258                if (pfn_valid(pfn))
 259                        __set_pte(ptep, pfn_pte(pfn, pte));
 260        }
 261}
 262
 263
 264void machine_kexec(struct kimage *image)
 265{
 266        void *reboot_code_buffer;
 267        pte_t *ptep;
 268        void (*rnk)(unsigned long, void *, unsigned long)
 269                __noreturn;
 270
 271        /* Mask all interrupts before starting to reboot. */
 272        interrupt_mask_set_mask(~0ULL);
 273
 274        kexec_find_and_set_command_line(image);
 275
 276        /*
 277         * Adjust the home caching of the control page to be cached on
 278         * this cpu, and copy the assembly helper into the control
 279         * code page, which we map in the vmalloc area.
 280         */
 281        homecache_change_page_home(image->control_code_page, 0,
 282                                   smp_processor_id());
 283        reboot_code_buffer = page_address(image->control_code_page);
 284        BUG_ON(reboot_code_buffer == NULL);
 285        ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
 286        __set_pte(ptep, pte_mkexec(*ptep));
 287        memcpy(reboot_code_buffer, relocate_new_kernel,
 288               relocate_new_kernel_size);
 289        __flush_icache_range(
 290                (unsigned long) reboot_code_buffer,
 291                (unsigned long) reboot_code_buffer + relocate_new_kernel_size);
 292
 293        setup_quasi_va_is_pa();
 294
 295        /* now call it */
 296        rnk = reboot_code_buffer;
 297        (*rnk)(image->head, reboot_code_buffer, image->start);
 298}
 299