linux/arch/sh/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/smp.c
   3 *
   4 * SMP support for the SuperH processors.
   5 *
   6 * Copyright (C) 2002 - 2010 Paul Mundt
   7 * Copyright (C) 2006 - 2007 Akio Idehara
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/err.h>
  14#include <linux/cache.h>
  15#include <linux/cpumask.h>
  16#include <linux/delay.h>
  17#include <linux/init.h>
  18#include <linux/spinlock.h>
  19#include <linux/mm.h>
  20#include <linux/module.h>
  21#include <linux/cpu.h>
  22#include <linux/interrupt.h>
  23#include <linux/sched/mm.h>
  24#include <linux/sched/hotplug.h>
  25#include <linux/atomic.h>
  26#include <linux/clockchips.h>
  27#include <asm/processor.h>
  28#include <asm/mmu_context.h>
  29#include <asm/smp.h>
  30#include <asm/cacheflush.h>
  31#include <asm/sections.h>
  32#include <asm/setup.h>
  33
  34int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
  35int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
  36
  37struct plat_smp_ops *mp_ops = NULL;
  38
  39/* State of each CPU */
  40DEFINE_PER_CPU(int, cpu_state) = { 0 };
  41
  42void register_smp_ops(struct plat_smp_ops *ops)
  43{
  44        if (mp_ops)
  45                printk(KERN_WARNING "Overriding previously set SMP ops\n");
  46
  47        mp_ops = ops;
  48}
  49
  50static inline void smp_store_cpu_info(unsigned int cpu)
  51{
  52        struct sh_cpuinfo *c = cpu_data + cpu;
  53
  54        memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
  55
  56        c->loops_per_jiffy = loops_per_jiffy;
  57}
  58
  59void __init smp_prepare_cpus(unsigned int max_cpus)
  60{
  61        unsigned int cpu = smp_processor_id();
  62
  63        init_new_context(current, &init_mm);
  64        current_thread_info()->cpu = cpu;
  65        mp_ops->prepare_cpus(max_cpus);
  66
  67#ifndef CONFIG_HOTPLUG_CPU
  68        init_cpu_present(cpu_possible_mask);
  69#endif
  70}
  71
  72void __init smp_prepare_boot_cpu(void)
  73{
  74        unsigned int cpu = smp_processor_id();
  75
  76        __cpu_number_map[0] = cpu;
  77        __cpu_logical_map[0] = cpu;
  78
  79        set_cpu_online(cpu, true);
  80        set_cpu_possible(cpu, true);
  81
  82        per_cpu(cpu_state, cpu) = CPU_ONLINE;
  83}
  84
  85#ifdef CONFIG_HOTPLUG_CPU
  86void native_cpu_die(unsigned int cpu)
  87{
  88        unsigned int i;
  89
  90        for (i = 0; i < 10; i++) {
  91                smp_rmb();
  92                if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
  93                        if (system_state == SYSTEM_RUNNING)
  94                                pr_info("CPU %u is now offline\n", cpu);
  95
  96                        return;
  97                }
  98
  99                msleep(100);
 100        }
 101
 102        pr_err("CPU %u didn't die...\n", cpu);
 103}
 104
 105int native_cpu_disable(unsigned int cpu)
 106{
 107        return cpu == 0 ? -EPERM : 0;
 108}
 109
 110void play_dead_common(void)
 111{
 112        idle_task_exit();
 113        irq_ctx_exit(raw_smp_processor_id());
 114        mb();
 115
 116        __this_cpu_write(cpu_state, CPU_DEAD);
 117        local_irq_disable();
 118}
 119
 120void native_play_dead(void)
 121{
 122        play_dead_common();
 123}
 124
 125int __cpu_disable(void)
 126{
 127        unsigned int cpu = smp_processor_id();
 128        int ret;
 129
 130        ret = mp_ops->cpu_disable(cpu);
 131        if (ret)
 132                return ret;
 133
 134        /*
 135         * Take this CPU offline.  Once we clear this, we can't return,
 136         * and we must not schedule until we're ready to give up the cpu.
 137         */
 138        set_cpu_online(cpu, false);
 139
 140        /*
 141         * OK - migrate IRQs away from this CPU
 142         */
 143        migrate_irqs();
 144
 145        /*
 146         * Flush user cache and TLB mappings, and then remove this CPU
 147         * from the vm mask set of all processes.
 148         */
 149        flush_cache_all();
 150#ifdef CONFIG_MMU
 151        local_flush_tlb_all();
 152#endif
 153
 154        clear_tasks_mm_cpumask(cpu);
 155
 156        return 0;
 157}
 158#else /* ... !CONFIG_HOTPLUG_CPU */
 159int native_cpu_disable(unsigned int cpu)
 160{
 161        return -ENOSYS;
 162}
 163
 164void native_cpu_die(unsigned int cpu)
 165{
 166        /* We said "no" in __cpu_disable */
 167        BUG();
 168}
 169
 170void native_play_dead(void)
 171{
 172        BUG();
 173}
 174#endif
 175
 176asmlinkage void start_secondary(void)
 177{
 178        unsigned int cpu = smp_processor_id();
 179        struct mm_struct *mm = &init_mm;
 180
 181        enable_mmu();
 182        mmgrab(mm);
 183        mmget(mm);
 184        current->active_mm = mm;
 185#ifdef CONFIG_MMU
 186        enter_lazy_tlb(mm, current);
 187        local_flush_tlb_all();
 188#endif
 189
 190        per_cpu_trap_init();
 191
 192        preempt_disable();
 193
 194        notify_cpu_starting(cpu);
 195
 196        local_irq_enable();
 197
 198        calibrate_delay();
 199
 200        smp_store_cpu_info(cpu);
 201
 202        set_cpu_online(cpu, true);
 203        per_cpu(cpu_state, cpu) = CPU_ONLINE;
 204
 205        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 206}
 207
 208extern struct {
 209        unsigned long sp;
 210        unsigned long bss_start;
 211        unsigned long bss_end;
 212        void *start_kernel_fn;
 213        void *cpu_init_fn;
 214        void *thread_info;
 215} stack_start;
 216
 217int __cpu_up(unsigned int cpu, struct task_struct *tsk)
 218{
 219        unsigned long timeout;
 220
 221        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 222
 223        /* Fill in data in head.S for secondary cpus */
 224        stack_start.sp = tsk->thread.sp;
 225        stack_start.thread_info = tsk->stack;
 226        stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
 227        stack_start.start_kernel_fn = start_secondary;
 228
 229        flush_icache_range((unsigned long)&stack_start,
 230                           (unsigned long)&stack_start + sizeof(stack_start));
 231        wmb();
 232
 233        mp_ops->start_cpu(cpu, (unsigned long)_stext);
 234
 235        timeout = jiffies + HZ;
 236        while (time_before(jiffies, timeout)) {
 237                if (cpu_online(cpu))
 238                        break;
 239
 240                udelay(10);
 241                barrier();
 242        }
 243
 244        if (cpu_online(cpu))
 245                return 0;
 246
 247        return -ENOENT;
 248}
 249
 250void __init smp_cpus_done(unsigned int max_cpus)
 251{
 252        unsigned long bogosum = 0;
 253        int cpu;
 254
 255        for_each_online_cpu(cpu)
 256                bogosum += cpu_data[cpu].loops_per_jiffy;
 257
 258        printk(KERN_INFO "SMP: Total of %d processors activated "
 259               "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
 260               bogosum / (500000/HZ),
 261               (bogosum / (5000/HZ)) % 100);
 262}
 263
 264void smp_send_reschedule(int cpu)
 265{
 266        mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
 267}
 268
 269void smp_send_stop(void)
 270{
 271        smp_call_function(stop_this_cpu, 0, 0);
 272}
 273
 274void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 275{
 276        int cpu;
 277
 278        for_each_cpu(cpu, mask)
 279                mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
 280}
 281
 282void arch_send_call_function_single_ipi(int cpu)
 283{
 284        mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
 285}
 286
 287#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 288void tick_broadcast(const struct cpumask *mask)
 289{
 290        int cpu;
 291
 292        for_each_cpu(cpu, mask)
 293                mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
 294}
 295
 296static void ipi_timer(void)
 297{
 298        irq_enter();
 299        tick_receive_broadcast();
 300        irq_exit();
 301}
 302#endif
 303
 304void smp_message_recv(unsigned int msg)
 305{
 306        switch (msg) {
 307        case SMP_MSG_FUNCTION:
 308                generic_smp_call_function_interrupt();
 309                break;
 310        case SMP_MSG_RESCHEDULE:
 311                scheduler_ipi();
 312                break;
 313        case SMP_MSG_FUNCTION_SINGLE:
 314                generic_smp_call_function_single_interrupt();
 315                break;
 316#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 317        case SMP_MSG_TIMER:
 318                ipi_timer();
 319                break;
 320#endif
 321        default:
 322                printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
 323                       smp_processor_id(), __func__, msg);
 324                break;
 325        }
 326}
 327
 328/* Not really SMP stuff ... */
 329int setup_profiling_timer(unsigned int multiplier)
 330{
 331        return 0;
 332}
 333
 334#ifdef CONFIG_MMU
 335
 336static void flush_tlb_all_ipi(void *info)
 337{
 338        local_flush_tlb_all();
 339}
 340
 341void flush_tlb_all(void)
 342{
 343        on_each_cpu(flush_tlb_all_ipi, 0, 1);
 344}
 345
 346static void flush_tlb_mm_ipi(void *mm)
 347{
 348        local_flush_tlb_mm((struct mm_struct *)mm);
 349}
 350
 351/*
 352 * The following tlb flush calls are invoked when old translations are
 353 * being torn down, or pte attributes are changing. For single threaded
 354 * address spaces, a new context is obtained on the current cpu, and tlb
 355 * context on other cpus are invalidated to force a new context allocation
 356 * at switch_mm time, should the mm ever be used on other cpus. For
 357 * multithreaded address spaces, intercpu interrupts have to be sent.
 358 * Another case where intercpu interrupts are required is when the target
 359 * mm might be active on another cpu (eg debuggers doing the flushes on
 360 * behalf of debugees, kswapd stealing pages from another process etc).
 361 * Kanoj 07/00.
 362 */
 363void flush_tlb_mm(struct mm_struct *mm)
 364{
 365        preempt_disable();
 366
 367        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 368                smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
 369        } else {
 370                int i;
 371                for_each_online_cpu(i)
 372                        if (smp_processor_id() != i)
 373                                cpu_context(i, mm) = 0;
 374        }
 375        local_flush_tlb_mm(mm);
 376
 377        preempt_enable();
 378}
 379
 380struct flush_tlb_data {
 381        struct vm_area_struct *vma;
 382        unsigned long addr1;
 383        unsigned long addr2;
 384};
 385
 386static void flush_tlb_range_ipi(void *info)
 387{
 388        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 389
 390        local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
 391}
 392
 393void flush_tlb_range(struct vm_area_struct *vma,
 394                     unsigned long start, unsigned long end)
 395{
 396        struct mm_struct *mm = vma->vm_mm;
 397
 398        preempt_disable();
 399        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 400                struct flush_tlb_data fd;
 401
 402                fd.vma = vma;
 403                fd.addr1 = start;
 404                fd.addr2 = end;
 405                smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
 406        } else {
 407                int i;
 408                for_each_online_cpu(i)
 409                        if (smp_processor_id() != i)
 410                                cpu_context(i, mm) = 0;
 411        }
 412        local_flush_tlb_range(vma, start, end);
 413        preempt_enable();
 414}
 415
 416static void flush_tlb_kernel_range_ipi(void *info)
 417{
 418        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 419
 420        local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
 421}
 422
 423void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 424{
 425        struct flush_tlb_data fd;
 426
 427        fd.addr1 = start;
 428        fd.addr2 = end;
 429        on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
 430}
 431
 432static void flush_tlb_page_ipi(void *info)
 433{
 434        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 435
 436        local_flush_tlb_page(fd->vma, fd->addr1);
 437}
 438
 439void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 440{
 441        preempt_disable();
 442        if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
 443            (current->mm != vma->vm_mm)) {
 444                struct flush_tlb_data fd;
 445
 446                fd.vma = vma;
 447                fd.addr1 = page;
 448                smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
 449        } else {
 450                int i;
 451                for_each_online_cpu(i)
 452                        if (smp_processor_id() != i)
 453                                cpu_context(i, vma->vm_mm) = 0;
 454        }
 455        local_flush_tlb_page(vma, page);
 456        preempt_enable();
 457}
 458
 459static void flush_tlb_one_ipi(void *info)
 460{
 461        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 462        local_flush_tlb_one(fd->addr1, fd->addr2);
 463}
 464
 465void flush_tlb_one(unsigned long asid, unsigned long vaddr)
 466{
 467        struct flush_tlb_data fd;
 468
 469        fd.addr1 = asid;
 470        fd.addr2 = vaddr;
 471
 472        smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
 473        local_flush_tlb_one(asid, vaddr);
 474}
 475
 476#endif
 477