linux/arch/powerpc/platforms/pseries/setup.c
<<
>>
Prefs
   1/*
   2 *  64-bit pSeries and RS/6000 setup code.
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Adapted from 'alpha' version by Gary Thomas
   6 *  Modified by Cort Dougan (cort@cs.nmt.edu)
   7 *  Modified by PPC64 Team, IBM Corp
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15/*
  16 * bootup setup stuff..
  17 */
  18
  19#include <linux/cpu.h>
  20#include <linux/errno.h>
  21#include <linux/sched.h>
  22#include <linux/kernel.h>
  23#include <linux/mm.h>
  24#include <linux/stddef.h>
  25#include <linux/unistd.h>
  26#include <linux/user.h>
  27#include <linux/tty.h>
  28#include <linux/major.h>
  29#include <linux/interrupt.h>
  30#include <linux/reboot.h>
  31#include <linux/init.h>
  32#include <linux/ioport.h>
  33#include <linux/console.h>
  34#include <linux/pci.h>
  35#include <linux/utsname.h>
  36#include <linux/adb.h>
  37#include <linux/export.h>
  38#include <linux/delay.h>
  39#include <linux/irq.h>
  40#include <linux/seq_file.h>
  41#include <linux/root_dev.h>
  42#include <linux/of.h>
  43#include <linux/of_pci.h>
  44
  45#include <asm/mmu.h>
  46#include <asm/processor.h>
  47#include <asm/io.h>
  48#include <asm/pgtable.h>
  49#include <asm/prom.h>
  50#include <asm/rtas.h>
  51#include <asm/pci-bridge.h>
  52#include <asm/iommu.h>
  53#include <asm/dma.h>
  54#include <asm/machdep.h>
  55#include <asm/irq.h>
  56#include <asm/time.h>
  57#include <asm/nvram.h>
  58#include <asm/pmc.h>
  59#include <asm/xics.h>
  60#include <asm/ppc-pci.h>
  61#include <asm/i8259.h>
  62#include <asm/udbg.h>
  63#include <asm/smp.h>
  64#include <asm/firmware.h>
  65#include <asm/eeh.h>
  66#include <asm/reg.h>
  67#include <asm/plpar_wrappers.h>
  68#include <asm/kexec.h>
  69
  70#include "pseries.h"
  71
  72int CMO_PrPSP = -1;
  73int CMO_SecPSP = -1;
  74unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
  75EXPORT_SYMBOL(CMO_PageSize);
  76
  77int fwnmi_active;  /* TRUE if an FWNMI handler is present */
  78
  79static void pSeries_show_cpuinfo(struct seq_file *m)
  80{
  81        struct device_node *root;
  82        const char *model = "";
  83
  84        root = of_find_node_by_path("/");
  85        if (root)
  86                model = of_get_property(root, "model", NULL);
  87        seq_printf(m, "machine\t\t: CHRP %s\n", model);
  88        of_node_put(root);
  89}
  90
  91/* Initialize firmware assisted non-maskable interrupts if
  92 * the firmware supports this feature.
  93 */
  94static void __init fwnmi_init(void)
  95{
  96        unsigned long system_reset_addr, machine_check_addr;
  97
  98        int ibm_nmi_register = rtas_token("ibm,nmi-register");
  99        if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
 100                return;
 101
 102        /* If the kernel's not linked at zero we point the firmware at low
 103         * addresses anyway, and use a trampoline to get to the real code. */
 104        system_reset_addr  = __pa(system_reset_fwnmi) - PHYSICAL_START;
 105        machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
 106
 107        if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
 108                                machine_check_addr))
 109                fwnmi_active = 1;
 110}
 111
 112static void pseries_8259_cascade(struct irq_desc *desc)
 113{
 114        struct irq_chip *chip = irq_desc_get_chip(desc);
 115        unsigned int cascade_irq = i8259_irq();
 116
 117        if (cascade_irq != NO_IRQ)
 118                generic_handle_irq(cascade_irq);
 119
 120        chip->irq_eoi(&desc->irq_data);
 121}
 122
 123static void __init pseries_setup_i8259_cascade(void)
 124{
 125        struct device_node *np, *old, *found = NULL;
 126        unsigned int cascade;
 127        const u32 *addrp;
 128        unsigned long intack = 0;
 129        int naddr;
 130
 131        for_each_node_by_type(np, "interrupt-controller") {
 132                if (of_device_is_compatible(np, "chrp,iic")) {
 133                        found = np;
 134                        break;
 135                }
 136        }
 137
 138        if (found == NULL) {
 139                printk(KERN_DEBUG "pic: no ISA interrupt controller\n");
 140                return;
 141        }
 142
 143        cascade = irq_of_parse_and_map(found, 0);
 144        if (cascade == NO_IRQ) {
 145                printk(KERN_ERR "pic: failed to map cascade interrupt");
 146                return;
 147        }
 148        pr_debug("pic: cascade mapped to irq %d\n", cascade);
 149
 150        for (old = of_node_get(found); old != NULL ; old = np) {
 151                np = of_get_parent(old);
 152                of_node_put(old);
 153                if (np == NULL)
 154                        break;
 155                if (strcmp(np->name, "pci") != 0)
 156                        continue;
 157                addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
 158                if (addrp == NULL)
 159                        continue;
 160                naddr = of_n_addr_cells(np);
 161                intack = addrp[naddr-1];
 162                if (naddr > 1)
 163                        intack |= ((unsigned long)addrp[naddr-2]) << 32;
 164        }
 165        if (intack)
 166                printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
 167        i8259_init(found, intack);
 168        of_node_put(found);
 169        irq_set_chained_handler(cascade, pseries_8259_cascade);
 170}
 171
 172static void __init pseries_init_irq(void)
 173{
 174        xics_init();
 175        pseries_setup_i8259_cascade();
 176}
 177
 178static void pseries_lpar_enable_pmcs(void)
 179{
 180        unsigned long set, reset;
 181
 182        set = 1UL << 63;
 183        reset = 0;
 184        plpar_hcall_norets(H_PERFMON, set, reset);
 185}
 186
 187static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
 188{
 189        struct of_reconfig_data *rd = data;
 190        struct device_node *parent, *np = rd->dn;
 191        struct pci_dn *pdn;
 192        int err = NOTIFY_OK;
 193
 194        switch (action) {
 195        case OF_RECONFIG_ATTACH_NODE:
 196                parent = of_get_parent(np);
 197                pdn = parent ? PCI_DN(parent) : NULL;
 198                if (pdn)
 199                        pci_add_device_node_info(pdn->phb, np);
 200
 201                of_node_put(parent);
 202                break;
 203        case OF_RECONFIG_DETACH_NODE:
 204                pdn = PCI_DN(np);
 205                if (pdn)
 206                        list_del(&pdn->list);
 207                break;
 208        default:
 209                err = NOTIFY_DONE;
 210                break;
 211        }
 212        return err;
 213}
 214
 215static struct notifier_block pci_dn_reconfig_nb = {
 216        .notifier_call = pci_dn_reconfig_notifier,
 217};
 218
 219struct kmem_cache *dtl_cache;
 220
 221#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 222/*
 223 * Allocate space for the dispatch trace log for all possible cpus
 224 * and register the buffers with the hypervisor.  This is used for
 225 * computing time stolen by the hypervisor.
 226 */
 227static int alloc_dispatch_logs(void)
 228{
 229        int cpu, ret;
 230        struct paca_struct *pp;
 231        struct dtl_entry *dtl;
 232
 233        if (!firmware_has_feature(FW_FEATURE_SPLPAR))
 234                return 0;
 235
 236        if (!dtl_cache)
 237                return 0;
 238
 239        for_each_possible_cpu(cpu) {
 240                pp = &paca[cpu];
 241                dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
 242                if (!dtl) {
 243                        pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
 244                                cpu);
 245                        pr_warn("Stolen time statistics will be unreliable\n");
 246                        break;
 247                }
 248
 249                pp->dtl_ridx = 0;
 250                pp->dispatch_log = dtl;
 251                pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
 252                pp->dtl_curr = dtl;
 253        }
 254
 255        /* Register the DTL for the current (boot) cpu */
 256        dtl = get_paca()->dispatch_log;
 257        get_paca()->dtl_ridx = 0;
 258        get_paca()->dtl_curr = dtl;
 259        get_paca()->lppaca_ptr->dtl_idx = 0;
 260
 261        /* hypervisor reads buffer length from this field */
 262        dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
 263        ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
 264        if (ret)
 265                pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
 266                       "with %d\n", smp_processor_id(),
 267                       hard_smp_processor_id(), ret);
 268        get_paca()->lppaca_ptr->dtl_enable_mask = 2;
 269
 270        return 0;
 271}
 272#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 273static inline int alloc_dispatch_logs(void)
 274{
 275        return 0;
 276}
 277#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 278
 279static int alloc_dispatch_log_kmem_cache(void)
 280{
 281        dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
 282                                                DISPATCH_LOG_BYTES, 0, NULL);
 283        if (!dtl_cache) {
 284                pr_warn("Failed to create dispatch trace log buffer cache\n");
 285                pr_warn("Stolen time statistics will be unreliable\n");
 286                return 0;
 287        }
 288
 289        return alloc_dispatch_logs();
 290}
 291machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
 292
 293static void pseries_lpar_idle(void)
 294{
 295        /*
 296         * Default handler to go into low thread priority and possibly
 297         * low power mode by ceding processor to hypervisor
 298         */
 299
 300        /* Indicate to hypervisor that we are idle. */
 301        get_lppaca()->idle = 1;
 302
 303        /*
 304         * Yield the processor to the hypervisor.  We return if
 305         * an external interrupt occurs (which are driven prior
 306         * to returning here) or if a prod occurs from another
 307         * processor. When returning here, external interrupts
 308         * are enabled.
 309         */
 310        cede_processor();
 311
 312        get_lppaca()->idle = 0;
 313}
 314
 315/*
 316 * Enable relocation on during exceptions. This has partition wide scope and
 317 * may take a while to complete, if it takes longer than one second we will
 318 * just give up rather than wasting any more time on this - if that turns out
 319 * to ever be a problem in practice we can move this into a kernel thread to
 320 * finish off the process later in boot.
 321 */
 322void pseries_enable_reloc_on_exc(void)
 323{
 324        long rc;
 325        unsigned int delay, total_delay = 0;
 326
 327        while (1) {
 328                rc = enable_reloc_on_exceptions();
 329                if (!H_IS_LONG_BUSY(rc)) {
 330                        if (rc == H_P2) {
 331                                pr_info("Relocation on exceptions not"
 332                                        " supported\n");
 333                        } else if (rc != H_SUCCESS) {
 334                                pr_warn("Unable to enable relocation"
 335                                        " on exceptions: %ld\n", rc);
 336                        }
 337                        break;
 338                }
 339
 340                delay = get_longbusy_msecs(rc);
 341                total_delay += delay;
 342                if (total_delay > 1000) {
 343                        pr_warn("Warning: Giving up waiting to enable "
 344                                "relocation on exceptions (%u msec)!\n",
 345                                total_delay);
 346                        return;
 347                }
 348
 349                mdelay(delay);
 350        }
 351}
 352EXPORT_SYMBOL(pseries_enable_reloc_on_exc);
 353
 354void pseries_disable_reloc_on_exc(void)
 355{
 356        long rc;
 357
 358        while (1) {
 359                rc = disable_reloc_on_exceptions();
 360                if (!H_IS_LONG_BUSY(rc))
 361                        break;
 362                mdelay(get_longbusy_msecs(rc));
 363        }
 364        if (rc != H_SUCCESS)
 365                pr_warning("Warning: Failed to disable relocation on "
 366                           "exceptions: %ld\n", rc);
 367}
 368EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
 369
 370#ifdef CONFIG_KEXEC
 371static void pSeries_machine_kexec(struct kimage *image)
 372{
 373        if (firmware_has_feature(FW_FEATURE_SET_MODE))
 374                pseries_disable_reloc_on_exc();
 375
 376        default_machine_kexec(image);
 377}
 378#endif
 379
 380#ifdef __LITTLE_ENDIAN__
 381void pseries_big_endian_exceptions(void)
 382{
 383        long rc;
 384
 385        while (1) {
 386                rc = enable_big_endian_exceptions();
 387                if (!H_IS_LONG_BUSY(rc))
 388                        break;
 389                mdelay(get_longbusy_msecs(rc));
 390        }
 391
 392        /*
 393         * At this point it is unlikely panic() will get anything
 394         * out to the user, since this is called very late in kexec
 395         * but at least this will stop us from continuing on further
 396         * and creating an even more difficult to debug situation.
 397         *
 398         * There is a known problem when kdump'ing, if cpus are offline
 399         * the above call will fail. Rather than panicking again, keep
 400         * going and hope the kdump kernel is also little endian, which
 401         * it usually is.
 402         */
 403        if (rc && !kdump_in_progress())
 404                panic("Could not enable big endian exceptions");
 405}
 406
 407void pseries_little_endian_exceptions(void)
 408{
 409        long rc;
 410
 411        while (1) {
 412                rc = enable_little_endian_exceptions();
 413                if (!H_IS_LONG_BUSY(rc))
 414                        break;
 415                mdelay(get_longbusy_msecs(rc));
 416        }
 417        if (rc) {
 418                ppc_md.progress("H_SET_MODE LE exception fail", 0);
 419                panic("Could not enable little endian exceptions");
 420        }
 421}
 422#endif
 423
 424static void __init find_and_init_phbs(void)
 425{
 426        struct device_node *node;
 427        struct pci_controller *phb;
 428        struct device_node *root = of_find_node_by_path("/");
 429
 430        for_each_child_of_node(root, node) {
 431                if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
 432                                           strcmp(node->type, "pciex") != 0))
 433                        continue;
 434
 435                phb = pcibios_alloc_controller(node);
 436                if (!phb)
 437                        continue;
 438                rtas_setup_phb(phb);
 439                pci_process_bridge_OF_ranges(phb, node, 0);
 440                isa_bridge_find_early(phb);
 441                phb->controller_ops = pseries_pci_controller_ops;
 442        }
 443
 444        of_node_put(root);
 445
 446        /*
 447         * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
 448         * in chosen.
 449         */
 450        of_pci_check_probe_only();
 451}
 452
 453static void __init pSeries_setup_arch(void)
 454{
 455        set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
 456
 457        /* Discover PIC type and setup ppc_md accordingly */
 458        smp_init_pseries();
 459
 460
 461        /* openpic global configuration register (64-bit format). */
 462        /* openpic Interrupt Source Unit pointer (64-bit format). */
 463        /* python0 facility area (mmio) (64-bit format) REAL address. */
 464
 465        /* init to some ~sane value until calibrate_delay() runs */
 466        loops_per_jiffy = 50000000;
 467
 468        fwnmi_init();
 469
 470        /* By default, only probe PCI (can be overridden by rtas_pci) */
 471        pci_add_flags(PCI_PROBE_ONLY);
 472
 473        /* Find and initialize PCI host bridges */
 474        init_pci_config_tokens();
 475        find_and_init_phbs();
 476        of_reconfig_notifier_register(&pci_dn_reconfig_nb);
 477
 478        pSeries_nvram_init();
 479
 480        if (firmware_has_feature(FW_FEATURE_LPAR)) {
 481                vpa_init(boot_cpuid);
 482                ppc_md.power_save = pseries_lpar_idle;
 483                ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
 484        } else {
 485                /* No special idle routine */
 486                ppc_md.enable_pmcs = power4_enable_pmcs;
 487        }
 488
 489        ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
 490}
 491
 492static int __init pSeries_init_panel(void)
 493{
 494        /* Manually leave the kernel version on the panel. */
 495#ifdef __BIG_ENDIAN__
 496        ppc_md.progress("Linux ppc64\n", 0);
 497#else
 498        ppc_md.progress("Linux ppc64le\n", 0);
 499#endif
 500        ppc_md.progress(init_utsname()->version, 0);
 501
 502        return 0;
 503}
 504machine_arch_initcall(pseries, pSeries_init_panel);
 505
 506static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
 507{
 508        return plpar_hcall_norets(H_SET_DABR, dabr);
 509}
 510
 511static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
 512{
 513        /* Have to set at least one bit in the DABRX according to PAPR */
 514        if (dabrx == 0 && dabr == 0)
 515                dabrx = DABRX_USER;
 516        /* PAPR says we can only set kernel and user bits */
 517        dabrx &= DABRX_KERNEL | DABRX_USER;
 518
 519        return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
 520}
 521
 522static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
 523{
 524        /* PAPR says we can't set HYP */
 525        dawrx &= ~DAWRX_HYP;
 526
 527        return  plapr_set_watchpoint0(dawr, dawrx);
 528}
 529
 530#define CMO_CHARACTERISTICS_TOKEN 44
 531#define CMO_MAXLENGTH 1026
 532
 533void pSeries_coalesce_init(void)
 534{
 535        struct hvcall_mpp_x_data mpp_x_data;
 536
 537        if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
 538                powerpc_firmware_features |= FW_FEATURE_XCMO;
 539        else
 540                powerpc_firmware_features &= ~FW_FEATURE_XCMO;
 541}
 542
 543/**
 544 * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
 545 * handle that here. (Stolen from parse_system_parameter_string)
 546 */
 547static void pSeries_cmo_feature_init(void)
 548{
 549        char *ptr, *key, *value, *end;
 550        int call_status;
 551        int page_order = IOMMU_PAGE_SHIFT_4K;
 552
 553        pr_debug(" -> fw_cmo_feature_init()\n");
 554        spin_lock(&rtas_data_buf_lock);
 555        memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
 556        call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
 557                                NULL,
 558                                CMO_CHARACTERISTICS_TOKEN,
 559                                __pa(rtas_data_buf),
 560                                RTAS_DATA_BUF_SIZE);
 561
 562        if (call_status != 0) {
 563                spin_unlock(&rtas_data_buf_lock);
 564                pr_debug("CMO not available\n");
 565                pr_debug(" <- fw_cmo_feature_init()\n");
 566                return;
 567        }
 568
 569        end = rtas_data_buf + CMO_MAXLENGTH - 2;
 570        ptr = rtas_data_buf + 2;        /* step over strlen value */
 571        key = value = ptr;
 572
 573        while (*ptr && (ptr <= end)) {
 574                /* Separate the key and value by replacing '=' with '\0' and
 575                 * point the value at the string after the '='
 576                 */
 577                if (ptr[0] == '=') {
 578                        ptr[0] = '\0';
 579                        value = ptr + 1;
 580                } else if (ptr[0] == '\0' || ptr[0] == ',') {
 581                        /* Terminate the string containing the key/value pair */
 582                        ptr[0] = '\0';
 583
 584                        if (key == value) {
 585                                pr_debug("Malformed key/value pair\n");
 586                                /* Never found a '=', end processing */
 587                                break;
 588                        }
 589
 590                        if (0 == strcmp(key, "CMOPageSize"))
 591                                page_order = simple_strtol(value, NULL, 10);
 592                        else if (0 == strcmp(key, "PrPSP"))
 593                                CMO_PrPSP = simple_strtol(value, NULL, 10);
 594                        else if (0 == strcmp(key, "SecPSP"))
 595                                CMO_SecPSP = simple_strtol(value, NULL, 10);
 596                        value = key = ptr + 1;
 597                }
 598                ptr++;
 599        }
 600
 601        /* Page size is returned as the power of 2 of the page size,
 602         * convert to the page size in bytes before returning
 603         */
 604        CMO_PageSize = 1 << page_order;
 605        pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);
 606
 607        if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
 608                pr_info("CMO enabled\n");
 609                pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
 610                         CMO_SecPSP);
 611                powerpc_firmware_features |= FW_FEATURE_CMO;
 612                pSeries_coalesce_init();
 613        } else
 614                pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
 615                         CMO_SecPSP);
 616        spin_unlock(&rtas_data_buf_lock);
 617        pr_debug(" <- fw_cmo_feature_init()\n");
 618}
 619
 620/*
 621 * Early initialization.  Relocation is on but do not reference unbolted pages
 622 */
 623static void __init pseries_init(void)
 624{
 625        pr_debug(" -> pseries_init()\n");
 626
 627#ifdef CONFIG_HVC_CONSOLE
 628        if (firmware_has_feature(FW_FEATURE_LPAR))
 629                hvc_vio_init_early();
 630#endif
 631        if (firmware_has_feature(FW_FEATURE_XDABR))
 632                ppc_md.set_dabr = pseries_set_xdabr;
 633        else if (firmware_has_feature(FW_FEATURE_DABR))
 634                ppc_md.set_dabr = pseries_set_dabr;
 635
 636        if (firmware_has_feature(FW_FEATURE_SET_MODE))
 637                ppc_md.set_dawr = pseries_set_dawr;
 638
 639        pSeries_cmo_feature_init();
 640        iommu_init_early_pSeries();
 641
 642        pr_debug(" <- pseries_init()\n");
 643}
 644
 645/**
 646 * pseries_power_off - tell firmware about how to power off the system.
 647 *
 648 * This function calls either the power-off rtas token in normal cases
 649 * or the ibm,power-off-ups token (if present & requested) in case of
 650 * a power failure. If power-off token is used, power on will only be
 651 * possible with power button press. If ibm,power-off-ups token is used
 652 * it will allow auto poweron after power is restored.
 653 */
 654static void pseries_power_off(void)
 655{
 656        int rc;
 657        int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
 658
 659        if (rtas_flash_term_hook)
 660                rtas_flash_term_hook(SYS_POWER_OFF);
 661
 662        if (rtas_poweron_auto == 0 ||
 663                rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
 664                rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
 665                printk(KERN_INFO "RTAS power-off returned %d\n", rc);
 666        } else {
 667                rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
 668                printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
 669        }
 670        for (;;);
 671}
 672
 673static int __init pSeries_probe(void)
 674{
 675        const char *dtype = of_get_property(of_root, "device_type", NULL);
 676
 677        if (dtype == NULL)
 678                return 0;
 679        if (strcmp(dtype, "chrp"))
 680                return 0;
 681
 682        /* Cell blades firmware claims to be chrp while it's not. Until this
 683         * is fixed, we need to avoid those here.
 684         */
 685        if (of_machine_is_compatible("IBM,CPBW-1.0") ||
 686            of_machine_is_compatible("IBM,CBEA"))
 687                return 0;
 688
 689        pm_power_off = pseries_power_off;
 690
 691        pr_debug("Machine is%s LPAR !\n",
 692                 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
 693
 694        pseries_init();
 695
 696        return 1;
 697}
 698
 699static int pSeries_pci_probe_mode(struct pci_bus *bus)
 700{
 701        if (firmware_has_feature(FW_FEATURE_LPAR))
 702                return PCI_PROBE_DEVTREE;
 703        return PCI_PROBE_NORMAL;
 704}
 705
 706struct pci_controller_ops pseries_pci_controller_ops = {
 707        .probe_mode             = pSeries_pci_probe_mode,
 708};
 709
 710define_machine(pseries) {
 711        .name                   = "pSeries",
 712        .probe                  = pSeries_probe,
 713        .setup_arch             = pSeries_setup_arch,
 714        .init_IRQ               = pseries_init_irq,
 715        .show_cpuinfo           = pSeries_show_cpuinfo,
 716        .log_error              = pSeries_log_error,
 717        .pcibios_fixup          = pSeries_final_fixup,
 718        .restart                = rtas_restart,
 719        .halt                   = rtas_halt,
 720        .panic                  = rtas_os_term,
 721        .get_boot_time          = rtas_get_boot_time,
 722        .get_rtc_time           = rtas_get_rtc_time,
 723        .set_rtc_time           = rtas_set_rtc_time,
 724        .calibrate_decr         = generic_calibrate_decr,
 725        .progress               = rtas_progress,
 726        .system_reset_exception = pSeries_system_reset_exception,
 727        .machine_check_exception = pSeries_machine_check_exception,
 728#ifdef CONFIG_KEXEC
 729        .machine_kexec          = pSeries_machine_kexec,
 730        .kexec_cpu_down         = pseries_kexec_cpu_down,
 731#endif
 732#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 733        .memory_block_size      = pseries_memory_block_size,
 734#endif
 735};
 736