linux/arch/powerpc/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * SMP support for ppc.
   3 *
   4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   5 * deal of code from the sparc and intel versions.
   6 *
   7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   8 *
   9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  11 *
  12 *      This program is free software; you can redistribute it and/or
  13 *      modify it under the terms of the GNU General Public License
  14 *      as published by the Free Software Foundation; either version
  15 *      2 of the License, or (at your option) any later version.
  16 */
  17
  18#undef DEBUG
  19
  20#include <linux/kernel.h>
  21#include <linux/export.h>
  22#include <linux/sched.h>
  23#include <linux/smp.h>
  24#include <linux/interrupt.h>
  25#include <linux/delay.h>
  26#include <linux/init.h>
  27#include <linux/spinlock.h>
  28#include <linux/cache.h>
  29#include <linux/err.h>
  30#include <linux/device.h>
  31#include <linux/cpu.h>
  32#include <linux/notifier.h>
  33#include <linux/topology.h>
  34
  35#include <asm/ptrace.h>
  36#include <linux/atomic.h>
  37#include <asm/irq.h>
  38#include <asm/page.h>
  39#include <asm/pgtable.h>
  40#include <asm/prom.h>
  41#include <asm/smp.h>
  42#include <asm/time.h>
  43#include <asm/machdep.h>
  44#include <asm/cputhreads.h>
  45#include <asm/cputable.h>
  46#include <asm/mpic.h>
  47#include <asm/vdso_datapage.h>
  48#ifdef CONFIG_PPC64
  49#include <asm/paca.h>
  50#endif
  51#include <asm/vdso.h>
  52#include <asm/debug.h>
  53
  54#ifdef DEBUG
  55#include <asm/udbg.h>
  56#define DBG(fmt...) udbg_printf(fmt)
  57#else
  58#define DBG(fmt...)
  59#endif
  60
  61#ifdef CONFIG_HOTPLUG_CPU
  62/* State of each CPU during hotplug phases */
  63static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  64#endif
  65
  66struct thread_info *secondary_ti;
  67
  68DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  69DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  70
  71EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  72EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  73
  74/* SMP operations for this machine */
  75struct smp_ops_t *smp_ops;
  76
  77/* Can't be static due to PowerMac hackery */
  78volatile unsigned int cpu_callin_map[NR_CPUS];
  79
  80int smt_enabled_at_boot = 1;
  81
  82static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
  83
  84#ifdef CONFIG_PPC64
  85int __devinit smp_generic_kick_cpu(int nr)
  86{
  87        BUG_ON(nr < 0 || nr >= NR_CPUS);
  88
  89        /*
  90         * The processor is currently spinning, waiting for the
  91         * cpu_start field to become non-zero After we set cpu_start,
  92         * the processor will continue on to secondary_start
  93         */
  94        if (!paca[nr].cpu_start) {
  95                paca[nr].cpu_start = 1;
  96                smp_mb();
  97                return 0;
  98        }
  99
 100#ifdef CONFIG_HOTPLUG_CPU
 101        /*
 102         * Ok it's not there, so it might be soft-unplugged, let's
 103         * try to bring it back
 104         */
 105        per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
 106        smp_wmb();
 107        smp_send_reschedule(nr);
 108#endif /* CONFIG_HOTPLUG_CPU */
 109
 110        return 0;
 111}
 112#endif /* CONFIG_PPC64 */
 113
 114static irqreturn_t call_function_action(int irq, void *data)
 115{
 116        generic_smp_call_function_interrupt();
 117        return IRQ_HANDLED;
 118}
 119
 120static irqreturn_t reschedule_action(int irq, void *data)
 121{
 122        scheduler_ipi();
 123        return IRQ_HANDLED;
 124}
 125
 126static irqreturn_t call_function_single_action(int irq, void *data)
 127{
 128        generic_smp_call_function_single_interrupt();
 129        return IRQ_HANDLED;
 130}
 131
 132static irqreturn_t debug_ipi_action(int irq, void *data)
 133{
 134        if (crash_ipi_function_ptr) {
 135                crash_ipi_function_ptr(get_irq_regs());
 136                return IRQ_HANDLED;
 137        }
 138
 139#ifdef CONFIG_DEBUGGER
 140        debugger_ipi(get_irq_regs());
 141#endif /* CONFIG_DEBUGGER */
 142
 143        return IRQ_HANDLED;
 144}
 145
 146static irq_handler_t smp_ipi_action[] = {
 147        [PPC_MSG_CALL_FUNCTION] =  call_function_action,
 148        [PPC_MSG_RESCHEDULE] = reschedule_action,
 149        [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
 150        [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
 151};
 152
 153const char *smp_ipi_name[] = {
 154        [PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 155        [PPC_MSG_RESCHEDULE] = "ipi reschedule",
 156        [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
 157        [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
 158};
 159
 160/* optional function to request ipi, for controllers with >= 4 ipis */
 161int smp_request_message_ipi(int virq, int msg)
 162{
 163        int err;
 164
 165        if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
 166                return -EINVAL;
 167        }
 168#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
 169        if (msg == PPC_MSG_DEBUGGER_BREAK) {
 170                return 1;
 171        }
 172#endif
 173        err = request_irq(virq, smp_ipi_action[msg],
 174                          IRQF_PERCPU | IRQF_NO_THREAD,
 175                          smp_ipi_name[msg], 0);
 176        WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 177                virq, smp_ipi_name[msg], err);
 178
 179        return err;
 180}
 181
 182#ifdef CONFIG_PPC_SMP_MUXED_IPI
 183struct cpu_messages {
 184        int messages;                   /* current messages */
 185        unsigned long data;             /* data for cause ipi */
 186};
 187static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 188
 189void smp_muxed_ipi_set_data(int cpu, unsigned long data)
 190{
 191        struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 192
 193        info->data = data;
 194}
 195
 196void smp_muxed_ipi_message_pass(int cpu, int msg)
 197{
 198        struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 199        char *message = (char *)&info->messages;
 200
 201        /*
 202         * Order previous accesses before accesses in the IPI handler.
 203         */
 204        smp_mb();
 205        message[msg] = 1;
 206        /*
 207         * cause_ipi functions are required to include a full barrier
 208         * before doing whatever causes the IPI.
 209         */
 210        smp_ops->cause_ipi(cpu, info->data);
 211}
 212
 213irqreturn_t smp_ipi_demux(void)
 214{
 215        struct cpu_messages *info = &__get_cpu_var(ipi_message);
 216        unsigned int all;
 217
 218        mb();   /* order any irq clear */
 219
 220        do {
 221                all = xchg(&info->messages, 0);
 222
 223#ifdef __BIG_ENDIAN
 224                if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
 225                        generic_smp_call_function_interrupt();
 226                if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
 227                        scheduler_ipi();
 228                if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
 229                        generic_smp_call_function_single_interrupt();
 230                if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
 231                        debug_ipi_action(0, NULL);
 232#else
 233#error Unsupported ENDIAN
 234#endif
 235        } while (info->messages);
 236
 237        return IRQ_HANDLED;
 238}
 239#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 240
 241static inline void do_message_pass(int cpu, int msg)
 242{
 243        if (smp_ops->message_pass)
 244                smp_ops->message_pass(cpu, msg);
 245#ifdef CONFIG_PPC_SMP_MUXED_IPI
 246        else
 247                smp_muxed_ipi_message_pass(cpu, msg);
 248#endif
 249}
 250
 251void smp_send_reschedule(int cpu)
 252{
 253        if (likely(smp_ops))
 254                do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 255}
 256EXPORT_SYMBOL_GPL(smp_send_reschedule);
 257
 258void arch_send_call_function_single_ipi(int cpu)
 259{
 260        do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
 261}
 262
 263void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 264{
 265        unsigned int cpu;
 266
 267        for_each_cpu(cpu, mask)
 268                do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 269}
 270
 271#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
 272void smp_send_debugger_break(void)
 273{
 274        int cpu;
 275        int me = raw_smp_processor_id();
 276
 277        if (unlikely(!smp_ops))
 278                return;
 279
 280        for_each_online_cpu(cpu)
 281                if (cpu != me)
 282                        do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
 283}
 284#endif
 285
 286#ifdef CONFIG_KEXEC
 287void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 288{
 289        crash_ipi_function_ptr = crash_ipi_callback;
 290        if (crash_ipi_callback) {
 291                mb();
 292                smp_send_debugger_break();
 293        }
 294}
 295#endif
 296
 297static void stop_this_cpu(void *dummy)
 298{
 299        /* Remove this CPU */
 300        set_cpu_online(smp_processor_id(), false);
 301
 302        local_irq_disable();
 303        while (1)
 304                ;
 305}
 306
 307void smp_send_stop(void)
 308{
 309        smp_call_function(stop_this_cpu, NULL, 0);
 310}
 311
 312struct thread_info *current_set[NR_CPUS];
 313
 314static void __devinit smp_store_cpu_info(int id)
 315{
 316        per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 317#ifdef CONFIG_PPC_FSL_BOOK3E
 318        per_cpu(next_tlbcam_idx, id)
 319                = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 320#endif
 321}
 322
 323void __init smp_prepare_cpus(unsigned int max_cpus)
 324{
 325        unsigned int cpu;
 326
 327        DBG("smp_prepare_cpus\n");
 328
 329        /* 
 330         * setup_cpu may need to be called on the boot cpu. We havent
 331         * spun any cpus up but lets be paranoid.
 332         */
 333        BUG_ON(boot_cpuid != smp_processor_id());
 334
 335        /* Fixup boot cpu */
 336        smp_store_cpu_info(boot_cpuid);
 337        cpu_callin_map[boot_cpuid] = 1;
 338
 339        for_each_possible_cpu(cpu) {
 340                zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
 341                                        GFP_KERNEL, cpu_to_node(cpu));
 342                zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
 343                                        GFP_KERNEL, cpu_to_node(cpu));
 344        }
 345
 346        cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
 347        cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
 348
 349        if (smp_ops)
 350                if (smp_ops->probe)
 351                        max_cpus = smp_ops->probe();
 352                else
 353                        max_cpus = NR_CPUS;
 354        else
 355                max_cpus = 1;
 356}
 357
 358void __devinit smp_prepare_boot_cpu(void)
 359{
 360        BUG_ON(smp_processor_id() != boot_cpuid);
 361#ifdef CONFIG_PPC64
 362        paca[boot_cpuid].__current = current;
 363#endif
 364        current_set[boot_cpuid] = task_thread_info(current);
 365}
 366
 367#ifdef CONFIG_HOTPLUG_CPU
 368
 369int generic_cpu_disable(void)
 370{
 371        unsigned int cpu = smp_processor_id();
 372
 373        if (cpu == boot_cpuid)
 374                return -EBUSY;
 375
 376        set_cpu_online(cpu, false);
 377#ifdef CONFIG_PPC64
 378        vdso_data->processorCount--;
 379#endif
 380        migrate_irqs();
 381        return 0;
 382}
 383
 384void generic_cpu_die(unsigned int cpu)
 385{
 386        int i;
 387
 388        for (i = 0; i < 100; i++) {
 389                smp_rmb();
 390                if (per_cpu(cpu_state, cpu) == CPU_DEAD)
 391                        return;
 392                msleep(100);
 393        }
 394        printk(KERN_ERR "CPU%d didn't die...\n", cpu);
 395}
 396
 397void generic_mach_cpu_die(void)
 398{
 399        unsigned int cpu;
 400
 401        local_irq_disable();
 402        idle_task_exit();
 403        cpu = smp_processor_id();
 404        printk(KERN_DEBUG "CPU%d offline\n", cpu);
 405        __get_cpu_var(cpu_state) = CPU_DEAD;
 406        smp_wmb();
 407        while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
 408                cpu_relax();
 409}
 410
 411void generic_set_cpu_dead(unsigned int cpu)
 412{
 413        per_cpu(cpu_state, cpu) = CPU_DEAD;
 414}
 415
 416int generic_check_cpu_restart(unsigned int cpu)
 417{
 418        return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 419}
 420#endif
 421
 422static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
 423{
 424        struct thread_info *ti = task_thread_info(idle);
 425
 426#ifdef CONFIG_PPC64
 427        paca[cpu].__current = idle;
 428        paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
 429#endif
 430        ti->cpu = cpu;
 431        secondary_ti = current_set[cpu] = ti;
 432}
 433
 434int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
 435{
 436        int rc, c;
 437
 438        if (smp_ops == NULL ||
 439            (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
 440                return -EINVAL;
 441
 442        cpu_idle_thread_init(cpu, tidle);
 443
 444        /* Make sure callin-map entry is 0 (can be leftover a CPU
 445         * hotplug
 446         */
 447        cpu_callin_map[cpu] = 0;
 448
 449        /* The information for processor bringup must
 450         * be written out to main store before we release
 451         * the processor.
 452         */
 453        smp_mb();
 454
 455        /* wake up cpus */
 456        DBG("smp: kicking cpu %d\n", cpu);
 457        rc = smp_ops->kick_cpu(cpu);
 458        if (rc) {
 459                pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
 460                return rc;
 461        }
 462
 463        /*
 464         * wait to see if the cpu made a callin (is actually up).
 465         * use this value that I found through experimentation.
 466         * -- Cort
 467         */
 468        if (system_state < SYSTEM_RUNNING)
 469                for (c = 50000; c && !cpu_callin_map[cpu]; c--)
 470                        udelay(100);
 471#ifdef CONFIG_HOTPLUG_CPU
 472        else
 473                /*
 474                 * CPUs can take much longer to come up in the
 475                 * hotplug case.  Wait five seconds.
 476                 */
 477                for (c = 5000; c && !cpu_callin_map[cpu]; c--)
 478                        msleep(1);
 479#endif
 480
 481        if (!cpu_callin_map[cpu]) {
 482                printk(KERN_ERR "Processor %u is stuck.\n", cpu);
 483                return -ENOENT;
 484        }
 485
 486        DBG("Processor %u found.\n", cpu);
 487
 488        if (smp_ops->give_timebase)
 489                smp_ops->give_timebase();
 490
 491        /* Wait until cpu puts itself in the online map */
 492        while (!cpu_online(cpu))
 493                cpu_relax();
 494
 495        return 0;
 496}
 497
 498/* Return the value of the reg property corresponding to the given
 499 * logical cpu.
 500 */
 501int cpu_to_core_id(int cpu)
 502{
 503        struct device_node *np;
 504        const int *reg;
 505        int id = -1;
 506
 507        np = of_get_cpu_node(cpu, NULL);
 508        if (!np)
 509                goto out;
 510
 511        reg = of_get_property(np, "reg", NULL);
 512        if (!reg)
 513                goto out;
 514
 515        id = *reg;
 516out:
 517        of_node_put(np);
 518        return id;
 519}
 520
 521/* Helper routines for cpu to core mapping */
 522int cpu_core_index_of_thread(int cpu)
 523{
 524        return cpu >> threads_shift;
 525}
 526EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
 527
 528int cpu_first_thread_of_core(int core)
 529{
 530        return core << threads_shift;
 531}
 532EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
 533
 534/* Must be called when no change can occur to cpu_present_mask,
 535 * i.e. during cpu online or offline.
 536 */
 537static struct device_node *cpu_to_l2cache(int cpu)
 538{
 539        struct device_node *np;
 540        struct device_node *cache;
 541
 542        if (!cpu_present(cpu))
 543                return NULL;
 544
 545        np = of_get_cpu_node(cpu, NULL);
 546        if (np == NULL)
 547                return NULL;
 548
 549        cache = of_find_next_cache_node(np);
 550
 551        of_node_put(np);
 552
 553        return cache;
 554}
 555
 556/* Activate a secondary processor. */
 557void __devinit start_secondary(void *unused)
 558{
 559        unsigned int cpu = smp_processor_id();
 560        struct device_node *l2_cache;
 561        int i, base;
 562
 563        atomic_inc(&init_mm.mm_count);
 564        current->active_mm = &init_mm;
 565
 566        smp_store_cpu_info(cpu);
 567        set_dec(tb_ticks_per_jiffy);
 568        preempt_disable();
 569        cpu_callin_map[cpu] = 1;
 570
 571        if (smp_ops->setup_cpu)
 572                smp_ops->setup_cpu(cpu);
 573        if (smp_ops->take_timebase)
 574                smp_ops->take_timebase();
 575
 576        secondary_cpu_time_init();
 577
 578#ifdef CONFIG_PPC64
 579        if (system_state == SYSTEM_RUNNING)
 580                vdso_data->processorCount++;
 581
 582        vdso_getcpu_init();
 583#endif
 584        notify_cpu_starting(cpu);
 585        set_cpu_online(cpu, true);
 586        /* Update sibling maps */
 587        base = cpu_first_thread_sibling(cpu);
 588        for (i = 0; i < threads_per_core; i++) {
 589                if (cpu_is_offline(base + i))
 590                        continue;
 591                cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
 592                cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
 593
 594                /* cpu_core_map should be a superset of
 595                 * cpu_sibling_map even if we don't have cache
 596                 * information, so update the former here, too.
 597                 */
 598                cpumask_set_cpu(cpu, cpu_core_mask(base + i));
 599                cpumask_set_cpu(base + i, cpu_core_mask(cpu));
 600        }
 601        l2_cache = cpu_to_l2cache(cpu);
 602        for_each_online_cpu(i) {
 603                struct device_node *np = cpu_to_l2cache(i);
 604                if (!np)
 605                        continue;
 606                if (np == l2_cache) {
 607                        cpumask_set_cpu(cpu, cpu_core_mask(i));
 608                        cpumask_set_cpu(i, cpu_core_mask(cpu));
 609                }
 610                of_node_put(np);
 611        }
 612        of_node_put(l2_cache);
 613
 614        local_irq_enable();
 615
 616        cpu_idle();
 617
 618        BUG();
 619}
 620
 621int setup_profiling_timer(unsigned int multiplier)
 622{
 623        return 0;
 624}
 625
 626void __init smp_cpus_done(unsigned int max_cpus)
 627{
 628        cpumask_var_t old_mask;
 629
 630        /* We want the setup_cpu() here to be called from CPU 0, but our
 631         * init thread may have been "borrowed" by another CPU in the meantime
 632         * se we pin us down to CPU 0 for a short while
 633         */
 634        alloc_cpumask_var(&old_mask, GFP_NOWAIT);
 635        cpumask_copy(old_mask, tsk_cpus_allowed(current));
 636        set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
 637        
 638        if (smp_ops && smp_ops->setup_cpu)
 639                smp_ops->setup_cpu(boot_cpuid);
 640
 641        set_cpus_allowed_ptr(current, old_mask);
 642
 643        free_cpumask_var(old_mask);
 644
 645        if (smp_ops && smp_ops->bringup_done)
 646                smp_ops->bringup_done();
 647
 648        dump_numa_cpu_topology();
 649
 650}
 651
 652int arch_sd_sibling_asym_packing(void)
 653{
 654        if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
 655                printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
 656                return SD_ASYM_PACKING;
 657        }
 658        return 0;
 659}
 660
 661#ifdef CONFIG_HOTPLUG_CPU
 662int __cpu_disable(void)
 663{
 664        struct device_node *l2_cache;
 665        int cpu = smp_processor_id();
 666        int base, i;
 667        int err;
 668
 669        if (!smp_ops->cpu_disable)
 670                return -ENOSYS;
 671
 672        err = smp_ops->cpu_disable();
 673        if (err)
 674                return err;
 675
 676        /* Update sibling maps */
 677        base = cpu_first_thread_sibling(cpu);
 678        for (i = 0; i < threads_per_core; i++) {
 679                cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
 680                cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
 681                cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
 682                cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
 683        }
 684
 685        l2_cache = cpu_to_l2cache(cpu);
 686        for_each_present_cpu(i) {
 687                struct device_node *np = cpu_to_l2cache(i);
 688                if (!np)
 689                        continue;
 690                if (np == l2_cache) {
 691                        cpumask_clear_cpu(cpu, cpu_core_mask(i));
 692                        cpumask_clear_cpu(i, cpu_core_mask(cpu));
 693                }
 694                of_node_put(np);
 695        }
 696        of_node_put(l2_cache);
 697
 698
 699        return 0;
 700}
 701
 702void __cpu_die(unsigned int cpu)
 703{
 704        if (smp_ops->cpu_die)
 705                smp_ops->cpu_die(cpu);
 706}
 707
 708static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
 709
 710void cpu_hotplug_driver_lock()
 711{
 712        mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
 713}
 714
 715void cpu_hotplug_driver_unlock()
 716{
 717        mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
 718}
 719
 720void cpu_die(void)
 721{
 722        if (ppc_md.cpu_die)
 723                ppc_md.cpu_die();
 724
 725        /* If we return, we re-enter start_secondary */
 726        start_secondary_resume();
 727}
 728
 729#endif
 730