linux/arch/s390/kernel/setup.c
<<
>>
Prefs
   1/*
   2 *  arch/s390/kernel/setup.c
   3 *
   4 *  S390 version
   5 *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
   6 *    Author(s): Hartmut Penner (hp@de.ibm.com),
   7 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   8 *
   9 *  Derived from "arch/i386/kernel/setup.c"
  10 *    Copyright (C) 1995, Linus Torvalds
  11 */
  12
  13/*
  14 * This file handles the architecture-dependent parts of initialization
  15 */
  16
  17#define KMSG_COMPONENT "setup"
  18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19
  20#include <linux/errno.h>
  21#include <linux/module.h>
  22#include <linux/sched.h>
  23#include <linux/kernel.h>
  24#include <linux/mm.h>
  25#include <linux/stddef.h>
  26#include <linux/unistd.h>
  27#include <linux/ptrace.h>
  28#include <linux/slab.h>
  29#include <linux/user.h>
  30#include <linux/tty.h>
  31#include <linux/ioport.h>
  32#include <linux/delay.h>
  33#include <linux/init.h>
  34#include <linux/initrd.h>
  35#include <linux/bootmem.h>
  36#include <linux/root_dev.h>
  37#include <linux/console.h>
  38#include <linux/kernel_stat.h>
  39#include <linux/device.h>
  40#include <linux/notifier.h>
  41#include <linux/pfn.h>
  42#include <linux/ctype.h>
  43#include <linux/reboot.h>
  44#include <linux/topology.h>
  45#include <linux/ftrace.h>
  46
  47#include <asm/ipl.h>
  48#include <asm/uaccess.h>
  49#include <asm/system.h>
  50#include <asm/smp.h>
  51#include <asm/mmu_context.h>
  52#include <asm/cpcmd.h>
  53#include <asm/lowcore.h>
  54#include <asm/irq.h>
  55#include <asm/page.h>
  56#include <asm/ptrace.h>
  57#include <asm/sections.h>
  58#include <asm/ebcdic.h>
  59#include <asm/compat.h>
  60#include <asm/kvm_virtio.h>
  61
  62long psw_kernel_bits    = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
  63                           PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
  64long psw_user_bits      = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
  65                           PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
  66                           PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
  67
  68/*
  69 * User copy operations.
  70 */
  71struct uaccess_ops uaccess;
  72EXPORT_SYMBOL(uaccess);
  73
  74/*
  75 * Machine setup..
  76 */
  77unsigned int console_mode = 0;
  78EXPORT_SYMBOL(console_mode);
  79
  80unsigned int console_devno = -1;
  81EXPORT_SYMBOL(console_devno);
  82
  83unsigned int console_irq = -1;
  84EXPORT_SYMBOL(console_irq);
  85
  86unsigned long elf_hwcap = 0;
  87char elf_platform[ELF_PLATFORM_SIZE];
  88
  89struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
  90volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
  91
  92int __initdata memory_end_set;
  93unsigned long __initdata memory_end;
  94
  95/* An array with a pointer to the lowcore of every CPU. */
  96struct _lowcore *lowcore_ptr[NR_CPUS];
  97EXPORT_SYMBOL(lowcore_ptr);
  98
  99/*
 100 * This is set up by the setup-routine at boot-time
 101 * for S390 need to find out, what we have to setup
 102 * using address 0x10400 ...
 103 */
 104
 105#include <asm/setup.h>
 106
 107static struct resource code_resource = {
 108        .name  = "Kernel code",
 109        .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 110};
 111
 112static struct resource data_resource = {
 113        .name = "Kernel data",
 114        .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 115};
 116
 117/*
 118 * cpu_init() initializes state that is per-CPU.
 119 */
 120void __cpuinit cpu_init(void)
 121{
 122        /*
 123         * Store processor id in lowcore (used e.g. in timer_interrupt)
 124         */
 125        get_cpu_id(&S390_lowcore.cpu_id);
 126
 127        /*
 128         * Force FPU initialization:
 129         */
 130        clear_thread_flag(TIF_USEDFPU);
 131        clear_used_math();
 132
 133        atomic_inc(&init_mm.mm_count);
 134        current->active_mm = &init_mm;
 135        BUG_ON(current->mm);
 136        enter_lazy_tlb(&init_mm, current);
 137}
 138
 139/*
 140 * condev= and conmode= setup parameter.
 141 */
 142
 143static int __init condev_setup(char *str)
 144{
 145        int vdev;
 146
 147        vdev = simple_strtoul(str, &str, 0);
 148        if (vdev >= 0 && vdev < 65536) {
 149                console_devno = vdev;
 150                console_irq = -1;
 151        }
 152        return 1;
 153}
 154
 155__setup("condev=", condev_setup);
 156
 157static void __init set_preferred_console(void)
 158{
 159        if (MACHINE_IS_KVM)
 160                add_preferred_console("hvc", 0, NULL);
 161        else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
 162                add_preferred_console("ttyS", 0, NULL);
 163        else if (CONSOLE_IS_3270)
 164                add_preferred_console("tty3270", 0, NULL);
 165}
 166
 167static int __init conmode_setup(char *str)
 168{
 169#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 170        if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
 171                SET_CONSOLE_SCLP;
 172#endif
 173#if defined(CONFIG_TN3215_CONSOLE)
 174        if (strncmp(str, "3215", 5) == 0)
 175                SET_CONSOLE_3215;
 176#endif
 177#if defined(CONFIG_TN3270_CONSOLE)
 178        if (strncmp(str, "3270", 5) == 0)
 179                SET_CONSOLE_3270;
 180#endif
 181        set_preferred_console();
 182        return 1;
 183}
 184
 185__setup("conmode=", conmode_setup);
 186
 187static void __init conmode_default(void)
 188{
 189        char query_buffer[1024];
 190        char *ptr;
 191
 192        if (MACHINE_IS_VM) {
 193                cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
 194                console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
 195                ptr = strstr(query_buffer, "SUBCHANNEL =");
 196                console_irq = simple_strtoul(ptr + 13, NULL, 16);
 197                cpcmd("QUERY TERM", query_buffer, 1024, NULL);
 198                ptr = strstr(query_buffer, "CONMODE");
 199                /*
 200                 * Set the conmode to 3215 so that the device recognition 
 201                 * will set the cu_type of the console to 3215. If the
 202                 * conmode is 3270 and we don't set it back then both
 203                 * 3215 and the 3270 driver will try to access the console
 204                 * device (3215 as console and 3270 as normal tty).
 205                 */
 206                cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
 207                if (ptr == NULL) {
 208#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 209                        SET_CONSOLE_SCLP;
 210#endif
 211                        return;
 212                }
 213                if (strncmp(ptr + 8, "3270", 4) == 0) {
 214#if defined(CONFIG_TN3270_CONSOLE)
 215                        SET_CONSOLE_3270;
 216#elif defined(CONFIG_TN3215_CONSOLE)
 217                        SET_CONSOLE_3215;
 218#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 219                        SET_CONSOLE_SCLP;
 220#endif
 221                } else if (strncmp(ptr + 8, "3215", 4) == 0) {
 222#if defined(CONFIG_TN3215_CONSOLE)
 223                        SET_CONSOLE_3215;
 224#elif defined(CONFIG_TN3270_CONSOLE)
 225                        SET_CONSOLE_3270;
 226#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 227                        SET_CONSOLE_SCLP;
 228#endif
 229                }
 230        } else {
 231#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 232                SET_CONSOLE_SCLP;
 233#endif
 234        }
 235}
 236
 237#ifdef CONFIG_ZFCPDUMP
 238static void __init setup_zfcpdump(unsigned int console_devno)
 239{
 240        static char str[41];
 241
 242        if (ipl_info.type != IPL_TYPE_FCP_DUMP)
 243                return;
 244        if (console_devno != -1)
 245                sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
 246                        ipl_info.data.fcp.dev_id.devno, console_devno);
 247        else
 248                sprintf(str, " cio_ignore=all,!0.0.%04x",
 249                        ipl_info.data.fcp.dev_id.devno);
 250        strcat(boot_command_line, str);
 251        console_loglevel = 2;
 252}
 253#else
 254static inline void setup_zfcpdump(unsigned int console_devno) {}
 255#endif /* CONFIG_ZFCPDUMP */
 256
 257 /*
 258 * Reboot, halt and power_off stubs. They just call _machine_restart,
 259 * _machine_halt or _machine_power_off. 
 260 */
 261
 262void machine_restart(char *command)
 263{
 264        if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
 265                /*
 266                 * Only unblank the console if we are called in enabled
 267                 * context or a bust_spinlocks cleared the way for us.
 268                 */
 269                console_unblank();
 270        _machine_restart(command);
 271}
 272
 273void machine_halt(void)
 274{
 275        if (!in_interrupt() || oops_in_progress)
 276                /*
 277                 * Only unblank the console if we are called in enabled
 278                 * context or a bust_spinlocks cleared the way for us.
 279                 */
 280                console_unblank();
 281        _machine_halt();
 282}
 283
 284void machine_power_off(void)
 285{
 286        if (!in_interrupt() || oops_in_progress)
 287                /*
 288                 * Only unblank the console if we are called in enabled
 289                 * context or a bust_spinlocks cleared the way for us.
 290                 */
 291                console_unblank();
 292        _machine_power_off();
 293}
 294
 295/*
 296 * Dummy power off function.
 297 */
 298void (*pm_power_off)(void) = machine_power_off;
 299
 300static int __init early_parse_mem(char *p)
 301{
 302        memory_end = memparse(p, &p);
 303        memory_end_set = 1;
 304        return 0;
 305}
 306early_param("mem", early_parse_mem);
 307
 308#ifdef CONFIG_S390_SWITCH_AMODE
 309unsigned int switch_amode = 0;
 310EXPORT_SYMBOL_GPL(switch_amode);
 311
 312static int set_amode_and_uaccess(unsigned long user_amode,
 313                                 unsigned long user32_amode)
 314{
 315        psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
 316                        PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
 317                        PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
 318#ifdef CONFIG_COMPAT
 319        psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
 320                          PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
 321                          PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
 322        psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
 323                          PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
 324                          PSW32_MASK_PSTATE;
 325#endif
 326        psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
 327                          PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
 328
 329        if (MACHINE_HAS_MVCOS) {
 330                memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
 331                return 1;
 332        } else {
 333                memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
 334                return 0;
 335        }
 336}
 337
 338/*
 339 * Switch kernel/user addressing modes?
 340 */
 341static int __init early_parse_switch_amode(char *p)
 342{
 343        switch_amode = 1;
 344        return 0;
 345}
 346early_param("switch_amode", early_parse_switch_amode);
 347
 348#else /* CONFIG_S390_SWITCH_AMODE */
 349static inline int set_amode_and_uaccess(unsigned long user_amode,
 350                                        unsigned long user32_amode)
 351{
 352        return 0;
 353}
 354#endif /* CONFIG_S390_SWITCH_AMODE */
 355
 356#ifdef CONFIG_S390_EXEC_PROTECT
 357unsigned int s390_noexec = 0;
 358EXPORT_SYMBOL_GPL(s390_noexec);
 359
 360/*
 361 * Enable execute protection?
 362 */
 363static int __init early_parse_noexec(char *p)
 364{
 365        if (!strncmp(p, "off", 3))
 366                return 0;
 367        switch_amode = 1;
 368        s390_noexec = 1;
 369        return 0;
 370}
 371early_param("noexec", early_parse_noexec);
 372#endif /* CONFIG_S390_EXEC_PROTECT */
 373
 374static void setup_addressing_mode(void)
 375{
 376        if (s390_noexec) {
 377                if (set_amode_and_uaccess(PSW_ASC_SECONDARY,
 378                                          PSW32_ASC_SECONDARY))
 379                        pr_info("Execute protection active, "
 380                                "mvcos available\n");
 381                else
 382                        pr_info("Execute protection active, "
 383                                "mvcos not available\n");
 384        } else if (switch_amode) {
 385                if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY))
 386                        pr_info("Address spaces switched, "
 387                                "mvcos available\n");
 388                else
 389                        pr_info("Address spaces switched, "
 390                                "mvcos not available\n");
 391        }
 392#ifdef CONFIG_TRACE_IRQFLAGS
 393        sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
 394        io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
 395#endif
 396}
 397
 398static void __init
 399setup_lowcore(void)
 400{
 401        struct _lowcore *lc;
 402        int lc_pages;
 403
 404        /*
 405         * Setup lowcore for boot cpu
 406         */
 407        lc_pages = sizeof(void *) == 8 ? 2 : 1;
 408        lc = (struct _lowcore *)
 409                __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
 410        memset(lc, 0, lc_pages * PAGE_SIZE);
 411        lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 412        lc->restart_psw.addr =
 413                PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
 414        if (switch_amode)
 415                lc->restart_psw.mask |= PSW_ASC_HOME;
 416        lc->external_new_psw.mask = psw_kernel_bits;
 417        lc->external_new_psw.addr =
 418                PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
 419        lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
 420        lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
 421        lc->program_new_psw.mask = psw_kernel_bits;
 422        lc->program_new_psw.addr =
 423                PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
 424        lc->mcck_new_psw.mask =
 425                psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
 426        lc->mcck_new_psw.addr =
 427                PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
 428        lc->io_new_psw.mask = psw_kernel_bits;
 429        lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
 430        lc->clock_comparator = -1ULL;
 431        lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
 432        lc->async_stack = (unsigned long)
 433                __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
 434        lc->panic_stack = (unsigned long)
 435                __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
 436        lc->current_task = (unsigned long) init_thread_union.thread_info.task;
 437        lc->thread_info = (unsigned long) &init_thread_union;
 438        lc->machine_flags = S390_lowcore.machine_flags;
 439#ifndef CONFIG_64BIT
 440        if (MACHINE_HAS_IEEE) {
 441                lc->extended_save_area_addr = (__u32)
 442                        __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
 443                /* enable extended save area */
 444                __ctl_set_bit(14, 29);
 445        }
 446#else
 447        lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
 448#endif
 449        lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
 450        lc->async_enter_timer = S390_lowcore.async_enter_timer;
 451        lc->exit_timer = S390_lowcore.exit_timer;
 452        lc->user_timer = S390_lowcore.user_timer;
 453        lc->system_timer = S390_lowcore.system_timer;
 454        lc->steal_timer = S390_lowcore.steal_timer;
 455        lc->last_update_timer = S390_lowcore.last_update_timer;
 456        lc->last_update_clock = S390_lowcore.last_update_clock;
 457        lc->ftrace_func = S390_lowcore.ftrace_func;
 458        set_prefix((u32)(unsigned long) lc);
 459        lowcore_ptr[0] = lc;
 460}
 461
 462static void __init
 463setup_resources(void)
 464{
 465        struct resource *res, *sub_res;
 466        int i;
 467
 468        code_resource.start = (unsigned long) &_text;
 469        code_resource.end = (unsigned long) &_etext - 1;
 470        data_resource.start = (unsigned long) &_etext;
 471        data_resource.end = (unsigned long) &_edata - 1;
 472
 473        for (i = 0; i < MEMORY_CHUNKS; i++) {
 474                if (!memory_chunk[i].size)
 475                        continue;
 476                res = alloc_bootmem_low(sizeof(struct resource));
 477                res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
 478                switch (memory_chunk[i].type) {
 479                case CHUNK_READ_WRITE:
 480                        res->name = "System RAM";
 481                        break;
 482                case CHUNK_READ_ONLY:
 483                        res->name = "System ROM";
 484                        res->flags |= IORESOURCE_READONLY;
 485                        break;
 486                default:
 487                        res->name = "reserved";
 488                }
 489                res->start = memory_chunk[i].addr;
 490                res->end = memory_chunk[i].addr +  memory_chunk[i].size - 1;
 491                request_resource(&iomem_resource, res);
 492
 493                if (code_resource.start >= res->start  &&
 494                        code_resource.start <= res->end &&
 495                        code_resource.end > res->end) {
 496                        sub_res = alloc_bootmem_low(sizeof(struct resource));
 497                        memcpy(sub_res, &code_resource,
 498                                sizeof(struct resource));
 499                        sub_res->end = res->end;
 500                        code_resource.start = res->end + 1;
 501                        request_resource(res, sub_res);
 502                }
 503
 504                if (code_resource.start >= res->start &&
 505                        code_resource.start <= res->end &&
 506                        code_resource.end <= res->end)
 507                        request_resource(res, &code_resource);
 508
 509                if (data_resource.start >= res->start &&
 510                        data_resource.start <= res->end &&
 511                        data_resource.end > res->end) {
 512                        sub_res = alloc_bootmem_low(sizeof(struct resource));
 513                        memcpy(sub_res, &data_resource,
 514                                sizeof(struct resource));
 515                        sub_res->end = res->end;
 516                        data_resource.start = res->end + 1;
 517                        request_resource(res, sub_res);
 518                }
 519
 520                if (data_resource.start >= res->start &&
 521                        data_resource.start <= res->end &&
 522                        data_resource.end <= res->end)
 523                        request_resource(res, &data_resource);
 524        }
 525}
 526
 527unsigned long real_memory_size;
 528EXPORT_SYMBOL_GPL(real_memory_size);
 529
 530static void __init setup_memory_end(void)
 531{
 532        unsigned long memory_size;
 533        unsigned long max_mem;
 534        int i;
 535
 536#ifdef CONFIG_ZFCPDUMP
 537        if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
 538                memory_end = ZFCPDUMP_HSA_SIZE;
 539                memory_end_set = 1;
 540        }
 541#endif
 542        memory_size = 0;
 543        memory_end &= PAGE_MASK;
 544
 545        max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
 546        memory_end = min(max_mem, memory_end);
 547
 548        /*
 549         * Make sure all chunks are MAX_ORDER aligned so we don't need the
 550         * extra checks that HOLES_IN_ZONE would require.
 551         */
 552        for (i = 0; i < MEMORY_CHUNKS; i++) {
 553                unsigned long start, end;
 554                struct mem_chunk *chunk;
 555                unsigned long align;
 556
 557                chunk = &memory_chunk[i];
 558                align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
 559                start = (chunk->addr + align - 1) & ~(align - 1);
 560                end = (chunk->addr + chunk->size) & ~(align - 1);
 561                if (start >= end)
 562                        memset(chunk, 0, sizeof(*chunk));
 563                else {
 564                        chunk->addr = start;
 565                        chunk->size = end - start;
 566                }
 567        }
 568
 569        for (i = 0; i < MEMORY_CHUNKS; i++) {
 570                struct mem_chunk *chunk = &memory_chunk[i];
 571
 572                real_memory_size = max(real_memory_size,
 573                                       chunk->addr + chunk->size);
 574                if (chunk->addr >= max_mem) {
 575                        memset(chunk, 0, sizeof(*chunk));
 576                        continue;
 577                }
 578                if (chunk->addr + chunk->size > max_mem)
 579                        chunk->size = max_mem - chunk->addr;
 580                memory_size = max(memory_size, chunk->addr + chunk->size);
 581        }
 582        if (!memory_end)
 583                memory_end = memory_size;
 584}
 585
 586static void __init
 587setup_memory(void)
 588{
 589        unsigned long bootmap_size;
 590        unsigned long start_pfn, end_pfn;
 591        int i;
 592
 593        /*
 594         * partially used pages are not usable - thus
 595         * we are rounding upwards:
 596         */
 597        start_pfn = PFN_UP(__pa(&_end));
 598        end_pfn = max_pfn = PFN_DOWN(memory_end);
 599
 600#ifdef CONFIG_BLK_DEV_INITRD
 601        /*
 602         * Move the initrd in case the bitmap of the bootmem allocater
 603         * would overwrite it.
 604         */
 605
 606        if (INITRD_START && INITRD_SIZE) {
 607                unsigned long bmap_size;
 608                unsigned long start;
 609
 610                bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
 611                bmap_size = PFN_PHYS(bmap_size);
 612
 613                if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
 614                        start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
 615
 616                        if (start + INITRD_SIZE > memory_end) {
 617                                pr_err("initrd extends beyond end of "
 618                                       "memory (0x%08lx > 0x%08lx) "
 619                                       "disabling initrd\n",
 620                                       start + INITRD_SIZE, memory_end);
 621                                INITRD_START = INITRD_SIZE = 0;
 622                        } else {
 623                                pr_info("Moving initrd (0x%08lx -> "
 624                                        "0x%08lx, size: %ld)\n",
 625                                        INITRD_START, start, INITRD_SIZE);
 626                                memmove((void *) start, (void *) INITRD_START,
 627                                        INITRD_SIZE);
 628                                INITRD_START = start;
 629                        }
 630                }
 631        }
 632#endif
 633
 634        /*
 635         * Initialize the boot-time allocator
 636         */
 637        bootmap_size = init_bootmem(start_pfn, end_pfn);
 638
 639        /*
 640         * Register RAM areas with the bootmem allocator.
 641         */
 642
 643        for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
 644                unsigned long start_chunk, end_chunk, pfn;
 645
 646                if (memory_chunk[i].type != CHUNK_READ_WRITE)
 647                        continue;
 648                start_chunk = PFN_DOWN(memory_chunk[i].addr);
 649                end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
 650                end_chunk = min(end_chunk, end_pfn);
 651                if (start_chunk >= end_chunk)
 652                        continue;
 653                add_active_range(0, start_chunk, end_chunk);
 654                pfn = max(start_chunk, start_pfn);
 655                for (; pfn < end_chunk; pfn++)
 656                        page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
 657        }
 658
 659        psw_set_key(PAGE_DEFAULT_KEY);
 660
 661        free_bootmem_with_active_regions(0, max_pfn);
 662
 663        /*
 664         * Reserve memory used for lowcore/command line/kernel image.
 665         */
 666        reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
 667        reserve_bootmem((unsigned long)_stext,
 668                        PFN_PHYS(start_pfn) - (unsigned long)_stext,
 669                        BOOTMEM_DEFAULT);
 670        /*
 671         * Reserve the bootmem bitmap itself as well. We do this in two
 672         * steps (first step was init_bootmem()) because this catches
 673         * the (very unlikely) case of us accidentally initializing the
 674         * bootmem allocator with an invalid RAM area.
 675         */
 676        reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
 677                        BOOTMEM_DEFAULT);
 678
 679#ifdef CONFIG_BLK_DEV_INITRD
 680        if (INITRD_START && INITRD_SIZE) {
 681                if (INITRD_START + INITRD_SIZE <= memory_end) {
 682                        reserve_bootmem(INITRD_START, INITRD_SIZE,
 683                                        BOOTMEM_DEFAULT);
 684                        initrd_start = INITRD_START;
 685                        initrd_end = initrd_start + INITRD_SIZE;
 686                } else {
 687                        pr_err("initrd extends beyond end of "
 688                               "memory (0x%08lx > 0x%08lx) "
 689                               "disabling initrd\n",
 690                               initrd_start + INITRD_SIZE, memory_end);
 691                        initrd_start = initrd_end = 0;
 692                }
 693        }
 694#endif
 695}
 696
 697/*
 698 * Setup hardware capabilities.
 699 */
 700static void __init setup_hwcaps(void)
 701{
 702        static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
 703        unsigned long long facility_list_extended;
 704        unsigned int facility_list;
 705        int i;
 706
 707        facility_list = stfl();
 708        /*
 709         * The store facility list bits numbers as found in the principles
 710         * of operation are numbered with bit 1UL<<31 as number 0 to
 711         * bit 1UL<<0 as number 31.
 712         *   Bit 0: instructions named N3, "backported" to esa-mode
 713         *   Bit 2: z/Architecture mode is active
 714         *   Bit 7: the store-facility-list-extended facility is installed
 715         *   Bit 17: the message-security assist is installed
 716         *   Bit 19: the long-displacement facility is installed
 717         *   Bit 21: the extended-immediate facility is installed
 718         *   Bit 22: extended-translation facility 3 is installed
 719         *   Bit 30: extended-translation facility 3 enhancement facility
 720         * These get translated to:
 721         *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
 722         *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
 723         *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
 724         *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
 725         */
 726        for (i = 0; i < 6; i++)
 727                if (facility_list & (1UL << (31 - stfl_bits[i])))
 728                        elf_hwcap |= 1UL << i;
 729
 730        if ((facility_list & (1UL << (31 - 22)))
 731            && (facility_list & (1UL << (31 - 30))))
 732                elf_hwcap |= HWCAP_S390_ETF3EH;
 733
 734        /*
 735         * Check for additional facilities with store-facility-list-extended.
 736         * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
 737         * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
 738         * as stored by stfl, bits 32-xxx contain additional facilities.
 739         * How many facility words are stored depends on the number of
 740         * doublewords passed to the instruction. The additional facilites
 741         * are:
 742         *   Bit 42: decimal floating point facility is installed
 743         *   Bit 44: perform floating point operation facility is installed
 744         * translated to:
 745         *   HWCAP_S390_DFP bit 6 (42 && 44).
 746         */
 747        if ((elf_hwcap & (1UL << 2)) &&
 748            __stfle(&facility_list_extended, 1) > 0) {
 749                if ((facility_list_extended & (1ULL << (63 - 42)))
 750                    && (facility_list_extended & (1ULL << (63 - 44))))
 751                        elf_hwcap |= HWCAP_S390_DFP;
 752        }
 753
 754        /*
 755         * Huge page support HWCAP_S390_HPAGE is bit 7.
 756         */
 757        if (MACHINE_HAS_HPAGE)
 758                elf_hwcap |= HWCAP_S390_HPAGE;
 759
 760        /*
 761         * 64-bit register support for 31-bit processes
 762         * HWCAP_S390_HIGH_GPRS is bit 9.
 763         */
 764        elf_hwcap |= HWCAP_S390_HIGH_GPRS;
 765
 766        switch (S390_lowcore.cpu_id.machine) {
 767        case 0x9672:
 768#if !defined(CONFIG_64BIT)
 769        default:        /* Use "g5" as default for 31 bit kernels. */
 770#endif
 771                strcpy(elf_platform, "g5");
 772                break;
 773        case 0x2064:
 774        case 0x2066:
 775#if defined(CONFIG_64BIT)
 776        default:        /* Use "z900" as default for 64 bit kernels. */
 777#endif
 778                strcpy(elf_platform, "z900");
 779                break;
 780        case 0x2084:
 781        case 0x2086:
 782                strcpy(elf_platform, "z990");
 783                break;
 784        case 0x2094:
 785        case 0x2096:
 786                strcpy(elf_platform, "z9-109");
 787                break;
 788        case 0x2097:
 789        case 0x2098:
 790                strcpy(elf_platform, "z10");
 791                break;
 792        }
 793}
 794
 795/*
 796 * Setup function called from init/main.c just after the banner
 797 * was printed.
 798 */
 799
 800void __init
 801setup_arch(char **cmdline_p)
 802{
 803        /*
 804         * print what head.S has found out about the machine
 805         */
 806#ifndef CONFIG_64BIT
 807        if (MACHINE_IS_VM)
 808                pr_info("Linux is running as a z/VM "
 809                        "guest operating system in 31-bit mode\n");
 810        else
 811                pr_info("Linux is running natively in 31-bit mode\n");
 812        if (MACHINE_HAS_IEEE)
 813                pr_info("The hardware system has IEEE compatible "
 814                        "floating point units\n");
 815        else
 816                pr_info("The hardware system has no IEEE compatible "
 817                        "floating point units\n");
 818#else /* CONFIG_64BIT */
 819        if (MACHINE_IS_VM)
 820                pr_info("Linux is running as a z/VM "
 821                        "guest operating system in 64-bit mode\n");
 822        else if (MACHINE_IS_KVM)
 823                pr_info("Linux is running under KVM in 64-bit mode\n");
 824        else
 825                pr_info("Linux is running natively in 64-bit mode\n");
 826#endif /* CONFIG_64BIT */
 827
 828        /* Have one command line that is parsed and saved in /proc/cmdline */
 829        /* boot_command_line has been already set up in early.c */
 830        *cmdline_p = boot_command_line;
 831
 832        ROOT_DEV = Root_RAM0;
 833
 834        init_mm.start_code = PAGE_OFFSET;
 835        init_mm.end_code = (unsigned long) &_etext;
 836        init_mm.end_data = (unsigned long) &_edata;
 837        init_mm.brk = (unsigned long) &_end;
 838
 839        if (MACHINE_HAS_MVCOS)
 840                memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
 841        else
 842                memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
 843
 844        parse_early_param();
 845
 846        setup_ipl();
 847        setup_memory_end();
 848        setup_addressing_mode();
 849        setup_memory();
 850        setup_resources();
 851        setup_lowcore();
 852
 853        cpu_init();
 854        __cpu_logical_map[0] = stap();
 855        s390_init_cpu_topology();
 856
 857        /*
 858         * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
 859         */
 860        setup_hwcaps();
 861
 862        /*
 863         * Create kernel page tables and switch to virtual addressing.
 864         */
 865        paging_init();
 866
 867        /* Setup default console */
 868        conmode_default();
 869        set_preferred_console();
 870
 871        /* Setup zfcpdump support */
 872        setup_zfcpdump(console_devno);
 873}
 874