linux/arch/arm/kernel/smp.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/smp.c
   3 *
   4 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/spinlock.h>
  14#include <linux/sched.h>
  15#include <linux/interrupt.h>
  16#include <linux/cache.h>
  17#include <linux/profile.h>
  18#include <linux/errno.h>
  19#include <linux/mm.h>
  20#include <linux/err.h>
  21#include <linux/cpu.h>
  22#include <linux/seq_file.h>
  23#include <linux/irq.h>
  24#include <linux/percpu.h>
  25#include <linux/clockchips.h>
  26#include <linux/completion.h>
  27#include <linux/cpufreq.h>
  28
  29#include <linux/atomic.h>
  30#include <asm/smp.h>
  31#include <asm/cacheflush.h>
  32#include <asm/cpu.h>
  33#include <asm/cputype.h>
  34#include <asm/exception.h>
  35#include <asm/idmap.h>
  36#include <asm/topology.h>
  37#include <asm/mmu_context.h>
  38#include <asm/pgtable.h>
  39#include <asm/pgalloc.h>
  40#include <asm/processor.h>
  41#include <asm/sections.h>
  42#include <asm/tlbflush.h>
  43#include <asm/ptrace.h>
  44#include <asm/localtimer.h>
  45#include <asm/smp_plat.h>
  46#include <asm/virt.h>
  47#include <asm/mach/arch.h>
  48
  49/*
  50 * as from 2.5, kernels no longer have an init_tasks structure
  51 * so we need some other way of telling a new secondary core
  52 * where to place its SVC stack
  53 */
  54struct secondary_data secondary_data;
  55
  56/*
  57 * control for which core is the next to come out of the secondary
  58 * boot "holding pen"
  59 */
  60volatile int __cpuinitdata pen_release = -1;
  61
  62enum ipi_msg_type {
  63        IPI_WAKEUP,
  64        IPI_TIMER,
  65        IPI_RESCHEDULE,
  66        IPI_CALL_FUNC,
  67        IPI_CALL_FUNC_SINGLE,
  68        IPI_CPU_STOP,
  69};
  70
  71static DECLARE_COMPLETION(cpu_running);
  72
  73static struct smp_operations smp_ops;
  74
  75void __init smp_set_ops(struct smp_operations *ops)
  76{
  77        if (ops)
  78                smp_ops = *ops;
  79};
  80
  81int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
  82{
  83        int ret;
  84
  85        /*
  86         * We need to tell the secondary core where to find
  87         * its stack and the page tables.
  88         */
  89        secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
  90        secondary_data.pgdir = virt_to_phys(idmap_pgd);
  91        secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
  92        __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
  93        outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
  94
  95        /*
  96         * Now bring the CPU into our world.
  97         */
  98        ret = boot_secondary(cpu, idle);
  99        if (ret == 0) {
 100                /*
 101                 * CPU was successfully started, wait for it
 102                 * to come online or time out.
 103                 */
 104                wait_for_completion_timeout(&cpu_running,
 105                                                 msecs_to_jiffies(1000));
 106
 107                if (!cpu_online(cpu)) {
 108                        pr_crit("CPU%u: failed to come online\n", cpu);
 109                        ret = -EIO;
 110                }
 111        } else {
 112                pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
 113        }
 114
 115        secondary_data.stack = NULL;
 116        secondary_data.pgdir = 0;
 117
 118        return ret;
 119}
 120
 121/* platform specific SMP operations */
 122void __init smp_init_cpus(void)
 123{
 124        if (smp_ops.smp_init_cpus)
 125                smp_ops.smp_init_cpus();
 126}
 127
 128int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 129{
 130        if (smp_ops.smp_boot_secondary)
 131                return smp_ops.smp_boot_secondary(cpu, idle);
 132        return -ENOSYS;
 133}
 134
 135#ifdef CONFIG_HOTPLUG_CPU
 136static void percpu_timer_stop(void);
 137
 138static int platform_cpu_kill(unsigned int cpu)
 139{
 140        if (smp_ops.cpu_kill)
 141                return smp_ops.cpu_kill(cpu);
 142        return 1;
 143}
 144
 145static int platform_cpu_disable(unsigned int cpu)
 146{
 147        if (smp_ops.cpu_disable)
 148                return smp_ops.cpu_disable(cpu);
 149
 150        /*
 151         * By default, allow disabling all CPUs except the first one,
 152         * since this is special on a lot of platforms, e.g. because
 153         * of clock tick interrupts.
 154         */
 155        return cpu == 0 ? -EPERM : 0;
 156}
 157/*
 158 * __cpu_disable runs on the processor to be shutdown.
 159 */
 160int __cpuinit __cpu_disable(void)
 161{
 162        unsigned int cpu = smp_processor_id();
 163        int ret;
 164
 165        ret = platform_cpu_disable(cpu);
 166        if (ret)
 167                return ret;
 168
 169        /*
 170         * Take this CPU offline.  Once we clear this, we can't return,
 171         * and we must not schedule until we're ready to give up the cpu.
 172         */
 173        set_cpu_online(cpu, false);
 174
 175        /*
 176         * OK - migrate IRQs away from this CPU
 177         */
 178        migrate_irqs();
 179
 180        /*
 181         * Stop the local timer for this CPU.
 182         */
 183        percpu_timer_stop();
 184
 185        /*
 186         * Flush user cache and TLB mappings, and then remove this CPU
 187         * from the vm mask set of all processes.
 188         *
 189         * Caches are flushed to the Level of Unification Inner Shareable
 190         * to write-back dirty lines to unified caches shared by all CPUs.
 191         */
 192        flush_cache_louis();
 193        local_flush_tlb_all();
 194
 195        clear_tasks_mm_cpumask(cpu);
 196
 197        return 0;
 198}
 199
 200static DECLARE_COMPLETION(cpu_died);
 201
 202/*
 203 * called on the thread which is asking for a CPU to be shutdown -
 204 * waits until shutdown has completed, or it is timed out.
 205 */
 206void __cpuinit __cpu_die(unsigned int cpu)
 207{
 208        if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
 209                pr_err("CPU%u: cpu didn't die\n", cpu);
 210                return;
 211        }
 212        printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
 213
 214        /*
 215         * platform_cpu_kill() is generally expected to do the powering off
 216         * and/or cutting of clocks to the dying CPU.  Optionally, this may
 217         * be done by the CPU which is dying in preference to supporting
 218         * this call, but that means there is _no_ synchronisation between
 219         * the requesting CPU and the dying CPU actually losing power.
 220         */
 221        if (!platform_cpu_kill(cpu))
 222                printk("CPU%u: unable to kill\n", cpu);
 223}
 224
 225/*
 226 * Called from the idle thread for the CPU which has been shutdown.
 227 *
 228 * Note that we disable IRQs here, but do not re-enable them
 229 * before returning to the caller. This is also the behaviour
 230 * of the other hotplug-cpu capable cores, so presumably coming
 231 * out of idle fixes this.
 232 */
 233void __ref cpu_die(void)
 234{
 235        unsigned int cpu = smp_processor_id();
 236
 237        idle_task_exit();
 238
 239        local_irq_disable();
 240
 241        /*
 242         * Flush the data out of the L1 cache for this CPU.  This must be
 243         * before the completion to ensure that data is safely written out
 244         * before platform_cpu_kill() gets called - which may disable
 245         * *this* CPU and power down its cache.
 246         */
 247        flush_cache_louis();
 248
 249        /*
 250         * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
 251         * this returns, power and/or clocks can be removed at any point
 252         * from this CPU and its cache by platform_cpu_kill().
 253         */
 254        complete(&cpu_died);
 255
 256        /*
 257         * Ensure that the cache lines associated with that completion are
 258         * written out.  This covers the case where _this_ CPU is doing the
 259         * powering down, to ensure that the completion is visible to the
 260         * CPU waiting for this one.
 261         */
 262        flush_cache_louis();
 263
 264        /*
 265         * The actual CPU shutdown procedure is at least platform (if not
 266         * CPU) specific.  This may remove power, or it may simply spin.
 267         *
 268         * Platforms are generally expected *NOT* to return from this call,
 269         * although there are some which do because they have no way to
 270         * power down the CPU.  These platforms are the _only_ reason we
 271         * have a return path which uses the fragment of assembly below.
 272         *
 273         * The return path should not be used for platforms which can
 274         * power off the CPU.
 275         */
 276        if (smp_ops.cpu_die)
 277                smp_ops.cpu_die(cpu);
 278
 279        /*
 280         * Do not return to the idle loop - jump back to the secondary
 281         * cpu initialisation.  There's some initialisation which needs
 282         * to be repeated to undo the effects of taking the CPU offline.
 283         */
 284        __asm__("mov    sp, %0\n"
 285        "       mov     fp, #0\n"
 286        "       b       secondary_start_kernel"
 287                :
 288                : "r" (task_stack_page(current) + THREAD_SIZE - 8));
 289}
 290#endif /* CONFIG_HOTPLUG_CPU */
 291
 292/*
 293 * Called by both boot and secondaries to move global data into
 294 * per-processor storage.
 295 */
 296static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
 297{
 298        struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
 299
 300        cpu_info->loops_per_jiffy = loops_per_jiffy;
 301        cpu_info->cpuid = read_cpuid_id();
 302
 303        store_cpu_topology(cpuid);
 304}
 305
 306static void percpu_timer_setup(void);
 307
 308/*
 309 * This is the secondary CPU boot entry.  We're using this CPUs
 310 * idle thread stack, but a set of temporary page tables.
 311 */
 312asmlinkage void __cpuinit secondary_start_kernel(void)
 313{
 314        struct mm_struct *mm = &init_mm;
 315        unsigned int cpu;
 316
 317        /*
 318         * The identity mapping is uncached (strongly ordered), so
 319         * switch away from it before attempting any exclusive accesses.
 320         */
 321        cpu_switch_mm(mm->pgd, mm);
 322        local_flush_bp_all();
 323        enter_lazy_tlb(mm, current);
 324        local_flush_tlb_all();
 325
 326        /*
 327         * All kernel threads share the same mm context; grab a
 328         * reference and switch to it.
 329         */
 330        cpu = smp_processor_id();
 331        atomic_inc(&mm->mm_count);
 332        current->active_mm = mm;
 333        cpumask_set_cpu(cpu, mm_cpumask(mm));
 334
 335        cpu_init();
 336
 337        printk("CPU%u: Booted secondary processor\n", cpu);
 338
 339        preempt_disable();
 340        trace_hardirqs_off();
 341
 342        /*
 343         * Give the platform a chance to do its own initialisation.
 344         */
 345        if (smp_ops.smp_secondary_init)
 346                smp_ops.smp_secondary_init(cpu);
 347
 348        notify_cpu_starting(cpu);
 349
 350        calibrate_delay();
 351
 352        smp_store_cpu_info(cpu);
 353
 354        /*
 355         * OK, now it's safe to let the boot CPU continue.  Wait for
 356         * the CPU migration code to notice that the CPU is online
 357         * before we continue - which happens after __cpu_up returns.
 358         */
 359        set_cpu_online(cpu, true);
 360        complete(&cpu_running);
 361
 362        /*
 363         * Setup the percpu timer for this CPU.
 364         */
 365        percpu_timer_setup();
 366
 367        local_irq_enable();
 368        local_fiq_enable();
 369
 370        /*
 371         * OK, it's off to the idle thread for us
 372         */
 373        cpu_startup_entry(CPUHP_ONLINE);
 374}
 375
 376void __init smp_cpus_done(unsigned int max_cpus)
 377{
 378        int cpu;
 379        unsigned long bogosum = 0;
 380
 381        for_each_online_cpu(cpu)
 382                bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
 383
 384        printk(KERN_INFO "SMP: Total of %d processors activated "
 385               "(%lu.%02lu BogoMIPS).\n",
 386               num_online_cpus(),
 387               bogosum / (500000/HZ),
 388               (bogosum / (5000/HZ)) % 100);
 389
 390        hyp_mode_check();
 391}
 392
 393void __init smp_prepare_boot_cpu(void)
 394{
 395        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 396}
 397
 398void __init smp_prepare_cpus(unsigned int max_cpus)
 399{
 400        unsigned int ncores = num_possible_cpus();
 401
 402        init_cpu_topology();
 403
 404        smp_store_cpu_info(smp_processor_id());
 405
 406        /*
 407         * are we trying to boot more cores than exist?
 408         */
 409        if (max_cpus > ncores)
 410                max_cpus = ncores;
 411        if (ncores > 1 && max_cpus) {
 412                /*
 413                 * Enable the local timer or broadcast device for the
 414                 * boot CPU, but only if we have more than one CPU.
 415                 */
 416                percpu_timer_setup();
 417
 418                /*
 419                 * Initialise the present map, which describes the set of CPUs
 420                 * actually populated at the present time. A platform should
 421                 * re-initialize the map in the platforms smp_prepare_cpus()
 422                 * if present != possible (e.g. physical hotplug).
 423                 */
 424                init_cpu_present(cpu_possible_mask);
 425
 426                /*
 427                 * Initialise the SCU if there are more than one CPU
 428                 * and let them know where to start.
 429                 */
 430                if (smp_ops.smp_prepare_cpus)
 431                        smp_ops.smp_prepare_cpus(max_cpus);
 432        }
 433}
 434
 435static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 436
 437void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 438{
 439        if (!smp_cross_call)
 440                smp_cross_call = fn;
 441}
 442
 443void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 444{
 445        smp_cross_call(mask, IPI_CALL_FUNC);
 446}
 447
 448void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 449{
 450        smp_cross_call(mask, IPI_WAKEUP);
 451}
 452
 453void arch_send_call_function_single_ipi(int cpu)
 454{
 455        smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
 456}
 457
 458static const char *ipi_types[NR_IPI] = {
 459#define S(x,s)  [x] = s
 460        S(IPI_WAKEUP, "CPU wakeup interrupts"),
 461        S(IPI_TIMER, "Timer broadcast interrupts"),
 462        S(IPI_RESCHEDULE, "Rescheduling interrupts"),
 463        S(IPI_CALL_FUNC, "Function call interrupts"),
 464        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
 465        S(IPI_CPU_STOP, "CPU stop interrupts"),
 466};
 467
 468void show_ipi_list(struct seq_file *p, int prec)
 469{
 470        unsigned int cpu, i;
 471
 472        for (i = 0; i < NR_IPI; i++) {
 473                seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
 474
 475                for_each_online_cpu(cpu)
 476                        seq_printf(p, "%10u ",
 477                                   __get_irq_stat(cpu, ipi_irqs[i]));
 478
 479                seq_printf(p, " %s\n", ipi_types[i]);
 480        }
 481}
 482
 483u64 smp_irq_stat_cpu(unsigned int cpu)
 484{
 485        u64 sum = 0;
 486        int i;
 487
 488        for (i = 0; i < NR_IPI; i++)
 489                sum += __get_irq_stat(cpu, ipi_irqs[i]);
 490
 491        return sum;
 492}
 493
 494/*
 495 * Timer (local or broadcast) support
 496 */
 497static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 498
 499#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 500void tick_broadcast(const struct cpumask *mask)
 501{
 502        smp_cross_call(mask, IPI_TIMER);
 503}
 504#endif
 505
 506static void broadcast_timer_set_mode(enum clock_event_mode mode,
 507        struct clock_event_device *evt)
 508{
 509}
 510
 511static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
 512{
 513        evt->name       = "dummy_timer";
 514        evt->features   = CLOCK_EVT_FEAT_ONESHOT |
 515                          CLOCK_EVT_FEAT_PERIODIC |
 516                          CLOCK_EVT_FEAT_DUMMY;
 517        evt->rating     = 100;
 518        evt->mult       = 1;
 519        evt->set_mode   = broadcast_timer_set_mode;
 520
 521        clockevents_register_device(evt);
 522}
 523
 524static struct local_timer_ops *lt_ops;
 525
 526#ifdef CONFIG_LOCAL_TIMERS
 527int local_timer_register(struct local_timer_ops *ops)
 528{
 529        if (!is_smp() || !setup_max_cpus)
 530                return -ENXIO;
 531
 532        if (lt_ops)
 533                return -EBUSY;
 534
 535        lt_ops = ops;
 536        return 0;
 537}
 538#endif
 539
 540static void __cpuinit percpu_timer_setup(void)
 541{
 542        unsigned int cpu = smp_processor_id();
 543        struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
 544
 545        evt->cpumask = cpumask_of(cpu);
 546
 547        if (!lt_ops || lt_ops->setup(evt))
 548                broadcast_timer_setup(evt);
 549}
 550
 551#ifdef CONFIG_HOTPLUG_CPU
 552/*
 553 * The generic clock events code purposely does not stop the local timer
 554 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
 555 * manually here.
 556 */
 557static void percpu_timer_stop(void)
 558{
 559        unsigned int cpu = smp_processor_id();
 560        struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
 561
 562        if (lt_ops)
 563                lt_ops->stop(evt);
 564}
 565#endif
 566
 567static DEFINE_RAW_SPINLOCK(stop_lock);
 568
 569/*
 570 * ipi_cpu_stop - handle IPI from smp_send_stop()
 571 */
 572static void ipi_cpu_stop(unsigned int cpu)
 573{
 574        if (system_state == SYSTEM_BOOTING ||
 575            system_state == SYSTEM_RUNNING) {
 576                raw_spin_lock(&stop_lock);
 577                printk(KERN_CRIT "CPU%u: stopping\n", cpu);
 578                dump_stack();
 579                raw_spin_unlock(&stop_lock);
 580        }
 581
 582        set_cpu_online(cpu, false);
 583
 584        local_fiq_disable();
 585        local_irq_disable();
 586
 587        while (1)
 588                cpu_relax();
 589}
 590
 591/*
 592 * Main handler for inter-processor interrupts
 593 */
 594asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
 595{
 596        handle_IPI(ipinr, regs);
 597}
 598
 599void handle_IPI(int ipinr, struct pt_regs *regs)
 600{
 601        unsigned int cpu = smp_processor_id();
 602        struct pt_regs *old_regs = set_irq_regs(regs);
 603
 604        if (ipinr < NR_IPI)
 605                __inc_irq_stat(cpu, ipi_irqs[ipinr]);
 606
 607        switch (ipinr) {
 608        case IPI_WAKEUP:
 609                break;
 610
 611#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 612        case IPI_TIMER:
 613                irq_enter();
 614                tick_receive_broadcast();
 615                irq_exit();
 616                break;
 617#endif
 618
 619        case IPI_RESCHEDULE:
 620                scheduler_ipi();
 621                break;
 622
 623        case IPI_CALL_FUNC:
 624                irq_enter();
 625                generic_smp_call_function_interrupt();
 626                irq_exit();
 627                break;
 628
 629        case IPI_CALL_FUNC_SINGLE:
 630                irq_enter();
 631                generic_smp_call_function_single_interrupt();
 632                irq_exit();
 633                break;
 634
 635        case IPI_CPU_STOP:
 636                irq_enter();
 637                ipi_cpu_stop(cpu);
 638                irq_exit();
 639                break;
 640
 641        default:
 642                printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
 643                       cpu, ipinr);
 644                break;
 645        }
 646        set_irq_regs(old_regs);
 647}
 648
 649void smp_send_reschedule(int cpu)
 650{
 651        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 652}
 653
 654void smp_send_stop(void)
 655{
 656        unsigned long timeout;
 657        struct cpumask mask;
 658
 659        cpumask_copy(&mask, cpu_online_mask);
 660        cpumask_clear_cpu(smp_processor_id(), &mask);
 661        if (!cpumask_empty(&mask))
 662                smp_cross_call(&mask, IPI_CPU_STOP);
 663
 664        /* Wait up to one second for other CPUs to stop */
 665        timeout = USEC_PER_SEC;
 666        while (num_online_cpus() > 1 && timeout--)
 667                udelay(1);
 668
 669        if (num_online_cpus() > 1)
 670                pr_warning("SMP: failed to stop secondary CPUs\n");
 671}
 672
 673/*
 674 * not supported here
 675 */
 676int setup_profiling_timer(unsigned int multiplier)
 677{
 678        return -EINVAL;
 679}
 680
 681#ifdef CONFIG_CPU_FREQ
 682
 683static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
 684static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
 685static unsigned long global_l_p_j_ref;
 686static unsigned long global_l_p_j_ref_freq;
 687
 688static int cpufreq_callback(struct notifier_block *nb,
 689                                        unsigned long val, void *data)
 690{
 691        struct cpufreq_freqs *freq = data;
 692        int cpu = freq->cpu;
 693
 694        if (freq->flags & CPUFREQ_CONST_LOOPS)
 695                return NOTIFY_OK;
 696
 697        if (!per_cpu(l_p_j_ref, cpu)) {
 698                per_cpu(l_p_j_ref, cpu) =
 699                        per_cpu(cpu_data, cpu).loops_per_jiffy;
 700                per_cpu(l_p_j_ref_freq, cpu) = freq->old;
 701                if (!global_l_p_j_ref) {
 702                        global_l_p_j_ref = loops_per_jiffy;
 703                        global_l_p_j_ref_freq = freq->old;
 704                }
 705        }
 706
 707        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
 708            (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
 709            (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
 710                loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
 711                                                global_l_p_j_ref_freq,
 712                                                freq->new);
 713                per_cpu(cpu_data, cpu).loops_per_jiffy =
 714                        cpufreq_scale(per_cpu(l_p_j_ref, cpu),
 715                                        per_cpu(l_p_j_ref_freq, cpu),
 716                                        freq->new);
 717        }
 718        return NOTIFY_OK;
 719}
 720
 721static struct notifier_block cpufreq_notifier = {
 722        .notifier_call  = cpufreq_callback,
 723};
 724
 725static int __init register_cpufreq_notifier(void)
 726{
 727        return cpufreq_register_notifier(&cpufreq_notifier,
 728                                                CPUFREQ_TRANSITION_NOTIFIER);
 729}
 730core_initcall(register_cpufreq_notifier);
 731
 732#endif
 733