linux/kernel/cpu.c
<<
>>
Prefs
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/proc_fs.h>
   7#include <linux/smp.h>
   8#include <linux/init.h>
   9#include <linux/notifier.h>
  10#include <linux/sched.h>
  11#include <linux/unistd.h>
  12#include <linux/cpu.h>
  13#include <linux/module.h>
  14#include <linux/kthread.h>
  15#include <linux/stop_machine.h>
  16#include <linux/mutex.h>
  17
  18/* This protects CPUs going up and down... */
  19static DEFINE_MUTEX(cpu_add_remove_lock);
  20static DEFINE_MUTEX(cpu_bitmask_lock);
  21
  22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
  23
  24/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  25 * Should always be manipulated under cpu_add_remove_lock
  26 */
  27static int cpu_hotplug_disabled;
  28
  29#ifdef CONFIG_HOTPLUG_CPU
  30
  31/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
  32static struct task_struct *recursive;
  33static int recursive_depth;
  34
  35void lock_cpu_hotplug(void)
  36{
  37        struct task_struct *tsk = current;
  38
  39        if (tsk == recursive) {
  40                static int warnings = 10;
  41                if (warnings) {
  42                        printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n");
  43                        WARN_ON(1);
  44                        warnings--;
  45                }
  46                recursive_depth++;
  47                return;
  48        }
  49        mutex_lock(&cpu_bitmask_lock);
  50        recursive = tsk;
  51}
  52EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
  53
  54void unlock_cpu_hotplug(void)
  55{
  56        WARN_ON(recursive != current);
  57        if (recursive_depth) {
  58                recursive_depth--;
  59                return;
  60        }
  61        recursive = NULL;
  62        mutex_unlock(&cpu_bitmask_lock);
  63}
  64EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
  65
  66#endif  /* CONFIG_HOTPLUG_CPU */
  67
  68/* Need to know about CPUs going up/down? */
  69int __cpuinit register_cpu_notifier(struct notifier_block *nb)
  70{
  71        int ret;
  72        mutex_lock(&cpu_add_remove_lock);
  73        ret = raw_notifier_chain_register(&cpu_chain, nb);
  74        mutex_unlock(&cpu_add_remove_lock);
  75        return ret;
  76}
  77
  78#ifdef CONFIG_HOTPLUG_CPU
  79
  80EXPORT_SYMBOL(register_cpu_notifier);
  81
  82void unregister_cpu_notifier(struct notifier_block *nb)
  83{
  84        mutex_lock(&cpu_add_remove_lock);
  85        raw_notifier_chain_unregister(&cpu_chain, nb);
  86        mutex_unlock(&cpu_add_remove_lock);
  87}
  88EXPORT_SYMBOL(unregister_cpu_notifier);
  89
  90static inline void check_for_tasks(int cpu)
  91{
  92        struct task_struct *p;
  93
  94        write_lock_irq(&tasklist_lock);
  95        for_each_process(p) {
  96                if (task_cpu(p) == cpu &&
  97                    (!cputime_eq(p->utime, cputime_zero) ||
  98                     !cputime_eq(p->stime, cputime_zero)))
  99                        printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
 100                                (state = %ld, flags = %x) \n",
 101                                 p->comm, task_pid_nr(p), cpu,
 102                                 p->state, p->flags);
 103        }
 104        write_unlock_irq(&tasklist_lock);
 105}
 106
 107struct take_cpu_down_param {
 108        unsigned long mod;
 109        void *hcpu;
 110};
 111
 112/* Take this CPU down. */
 113static int take_cpu_down(void *_param)
 114{
 115        struct take_cpu_down_param *param = _param;
 116        int err;
 117
 118        raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
 119                                param->hcpu);
 120        /* Ensure this CPU doesn't handle any more interrupts. */
 121        err = __cpu_disable();
 122        if (err < 0)
 123                return err;
 124
 125        /* Force idle task to run as soon as we yield: it should
 126           immediately notice cpu is offline and die quickly. */
 127        sched_idle_next();
 128        return 0;
 129}
 130
 131/* Requires cpu_add_remove_lock to be held */
 132static int _cpu_down(unsigned int cpu, int tasks_frozen)
 133{
 134        int err, nr_calls = 0;
 135        struct task_struct *p;
 136        cpumask_t old_allowed, tmp;
 137        void *hcpu = (void *)(long)cpu;
 138        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 139        struct take_cpu_down_param tcd_param = {
 140                .mod = mod,
 141                .hcpu = hcpu,
 142        };
 143
 144        if (num_online_cpus() == 1)
 145                return -EBUSY;
 146
 147        if (!cpu_online(cpu))
 148                return -EINVAL;
 149
 150        raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
 151        err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
 152                                        hcpu, -1, &nr_calls);
 153        if (err == NOTIFY_BAD) {
 154                nr_calls--;
 155                __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
 156                                          hcpu, nr_calls, NULL);
 157                printk("%s: attempt to take down CPU %u failed\n",
 158                                __FUNCTION__, cpu);
 159                err = -EINVAL;
 160                goto out_release;
 161        }
 162
 163        /* Ensure that we are not runnable on dying cpu */
 164        old_allowed = current->cpus_allowed;
 165        tmp = CPU_MASK_ALL;
 166        cpu_clear(cpu, tmp);
 167        set_cpus_allowed(current, tmp);
 168
 169        mutex_lock(&cpu_bitmask_lock);
 170        p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
 171        mutex_unlock(&cpu_bitmask_lock);
 172
 173        if (IS_ERR(p) || cpu_online(cpu)) {
 174                /* CPU didn't die: tell everyone.  Can't complain. */
 175                if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
 176                                            hcpu) == NOTIFY_BAD)
 177                        BUG();
 178
 179                if (IS_ERR(p)) {
 180                        err = PTR_ERR(p);
 181                        goto out_allowed;
 182                }
 183                goto out_thread;
 184        }
 185
 186        /* Wait for it to sleep (leaving idle task). */
 187        while (!idle_cpu(cpu))
 188                yield();
 189
 190        /* This actually kills the CPU. */
 191        __cpu_die(cpu);
 192
 193        /* CPU is completely dead: tell everyone.  Too late to complain. */
 194        if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
 195                                    hcpu) == NOTIFY_BAD)
 196                BUG();
 197
 198        check_for_tasks(cpu);
 199
 200out_thread:
 201        err = kthread_stop(p);
 202out_allowed:
 203        set_cpus_allowed(current, old_allowed);
 204out_release:
 205        raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
 206        return err;
 207}
 208
 209int cpu_down(unsigned int cpu)
 210{
 211        int err = 0;
 212
 213        mutex_lock(&cpu_add_remove_lock);
 214        if (cpu_hotplug_disabled)
 215                err = -EBUSY;
 216        else
 217                err = _cpu_down(cpu, 0);
 218
 219        mutex_unlock(&cpu_add_remove_lock);
 220        return err;
 221}
 222#endif /*CONFIG_HOTPLUG_CPU*/
 223
 224/* Requires cpu_add_remove_lock to be held */
 225static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 226{
 227        int ret, nr_calls = 0;
 228        void *hcpu = (void *)(long)cpu;
 229        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 230
 231        if (cpu_online(cpu) || !cpu_present(cpu))
 232                return -EINVAL;
 233
 234        raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
 235        ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
 236                                                        -1, &nr_calls);
 237        if (ret == NOTIFY_BAD) {
 238                nr_calls--;
 239                printk("%s: attempt to bring up CPU %u failed\n",
 240                                __FUNCTION__, cpu);
 241                ret = -EINVAL;
 242                goto out_notify;
 243        }
 244
 245        /* Arch-specific enabling code. */
 246        mutex_lock(&cpu_bitmask_lock);
 247        ret = __cpu_up(cpu);
 248        mutex_unlock(&cpu_bitmask_lock);
 249        if (ret != 0)
 250                goto out_notify;
 251        BUG_ON(!cpu_online(cpu));
 252
 253        /* Now call notifier in preparation. */
 254        raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
 255
 256out_notify:
 257        if (ret != 0)
 258                __raw_notifier_call_chain(&cpu_chain,
 259                                CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 260        raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu);
 261
 262        return ret;
 263}
 264
 265int __cpuinit cpu_up(unsigned int cpu)
 266{
 267        int err = 0;
 268        if (!cpu_isset(cpu, cpu_possible_map)) {
 269                printk(KERN_ERR "can't online cpu %d because it is not "
 270                        "configured as may-hotadd at boot time\n", cpu);
 271#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
 272                printk(KERN_ERR "please check additional_cpus= boot "
 273                                "parameter\n");
 274#endif
 275                return -EINVAL;
 276        }
 277
 278        mutex_lock(&cpu_add_remove_lock);
 279        if (cpu_hotplug_disabled)
 280                err = -EBUSY;
 281        else
 282                err = _cpu_up(cpu, 0);
 283
 284        mutex_unlock(&cpu_add_remove_lock);
 285        return err;
 286}
 287
 288#ifdef CONFIG_PM_SLEEP_SMP
 289static cpumask_t frozen_cpus;
 290
 291int disable_nonboot_cpus(void)
 292{
 293        int cpu, first_cpu, error = 0;
 294
 295        mutex_lock(&cpu_add_remove_lock);
 296        first_cpu = first_cpu(cpu_online_map);
 297        /* We take down all of the non-boot CPUs in one shot to avoid races
 298         * with the userspace trying to use the CPU hotplug at the same time
 299         */
 300        cpus_clear(frozen_cpus);
 301        printk("Disabling non-boot CPUs ...\n");
 302        for_each_online_cpu(cpu) {
 303                if (cpu == first_cpu)
 304                        continue;
 305                error = _cpu_down(cpu, 1);
 306                if (!error) {
 307                        cpu_set(cpu, frozen_cpus);
 308                        printk("CPU%d is down\n", cpu);
 309                } else {
 310                        printk(KERN_ERR "Error taking CPU%d down: %d\n",
 311                                cpu, error);
 312                        break;
 313                }
 314        }
 315        if (!error) {
 316                BUG_ON(num_online_cpus() > 1);
 317                /* Make sure the CPUs won't be enabled by someone else */
 318                cpu_hotplug_disabled = 1;
 319        } else {
 320                printk(KERN_ERR "Non-boot CPUs are not disabled\n");
 321        }
 322        mutex_unlock(&cpu_add_remove_lock);
 323        return error;
 324}
 325
 326void enable_nonboot_cpus(void)
 327{
 328        int cpu, error;
 329
 330        /* Allow everyone to use the CPU hotplug again */
 331        mutex_lock(&cpu_add_remove_lock);
 332        cpu_hotplug_disabled = 0;
 333        if (cpus_empty(frozen_cpus))
 334                goto out;
 335
 336        printk("Enabling non-boot CPUs ...\n");
 337        for_each_cpu_mask(cpu, frozen_cpus) {
 338                error = _cpu_up(cpu, 1);
 339                if (!error) {
 340                        printk("CPU%d is up\n", cpu);
 341                        continue;
 342                }
 343                printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
 344        }
 345        cpus_clear(frozen_cpus);
 346out:
 347        mutex_unlock(&cpu_add_remove_lock);
 348}
 349#endif /* CONFIG_PM_SLEEP_SMP */
 350