linux/arch/sh/kernel/smp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * arch/sh/kernel/smp.c
   4 *
   5 * SMP support for the SuperH processors.
   6 *
   7 * Copyright (C) 2002 - 2010 Paul Mundt
   8 * Copyright (C) 2006 - 2007 Akio Idehara
   9 */
  10#include <linux/err.h>
  11#include <linux/cache.h>
  12#include <linux/cpumask.h>
  13#include <linux/delay.h>
  14#include <linux/init.h>
  15#include <linux/spinlock.h>
  16#include <linux/mm.h>
  17#include <linux/module.h>
  18#include <linux/cpu.h>
  19#include <linux/interrupt.h>
  20#include <linux/sched/mm.h>
  21#include <linux/sched/hotplug.h>
  22#include <linux/atomic.h>
  23#include <linux/clockchips.h>
  24#include <asm/processor.h>
  25#include <asm/mmu_context.h>
  26#include <asm/smp.h>
  27#include <asm/cacheflush.h>
  28#include <asm/sections.h>
  29#include <asm/setup.h>
  30
  31int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
  32int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
  33
  34struct plat_smp_ops *mp_ops = NULL;
  35
  36/* State of each CPU */
  37DEFINE_PER_CPU(int, cpu_state) = { 0 };
  38
  39void register_smp_ops(struct plat_smp_ops *ops)
  40{
  41        if (mp_ops)
  42                printk(KERN_WARNING "Overriding previously set SMP ops\n");
  43
  44        mp_ops = ops;
  45}
  46
  47static inline void smp_store_cpu_info(unsigned int cpu)
  48{
  49        struct sh_cpuinfo *c = cpu_data + cpu;
  50
  51        memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
  52
  53        c->loops_per_jiffy = loops_per_jiffy;
  54}
  55
  56void __init smp_prepare_cpus(unsigned int max_cpus)
  57{
  58        unsigned int cpu = smp_processor_id();
  59
  60        init_new_context(current, &init_mm);
  61        current_thread_info()->cpu = cpu;
  62        mp_ops->prepare_cpus(max_cpus);
  63
  64#ifndef CONFIG_HOTPLUG_CPU
  65        init_cpu_present(cpu_possible_mask);
  66#endif
  67}
  68
  69void __init smp_prepare_boot_cpu(void)
  70{
  71        unsigned int cpu = smp_processor_id();
  72
  73        __cpu_number_map[0] = cpu;
  74        __cpu_logical_map[0] = cpu;
  75
  76        set_cpu_online(cpu, true);
  77        set_cpu_possible(cpu, true);
  78
  79        per_cpu(cpu_state, cpu) = CPU_ONLINE;
  80}
  81
  82#ifdef CONFIG_HOTPLUG_CPU
  83void native_cpu_die(unsigned int cpu)
  84{
  85        unsigned int i;
  86
  87        for (i = 0; i < 10; i++) {
  88                smp_rmb();
  89                if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
  90                        if (system_state == SYSTEM_RUNNING)
  91                                pr_info("CPU %u is now offline\n", cpu);
  92
  93                        return;
  94                }
  95
  96                msleep(100);
  97        }
  98
  99        pr_err("CPU %u didn't die...\n", cpu);
 100}
 101
 102int native_cpu_disable(unsigned int cpu)
 103{
 104        return cpu == 0 ? -EPERM : 0;
 105}
 106
 107void play_dead_common(void)
 108{
 109        idle_task_exit();
 110        irq_ctx_exit(raw_smp_processor_id());
 111        mb();
 112
 113        __this_cpu_write(cpu_state, CPU_DEAD);
 114        local_irq_disable();
 115}
 116
 117void native_play_dead(void)
 118{
 119        play_dead_common();
 120}
 121
 122int __cpu_disable(void)
 123{
 124        unsigned int cpu = smp_processor_id();
 125        int ret;
 126
 127        ret = mp_ops->cpu_disable(cpu);
 128        if (ret)
 129                return ret;
 130
 131        /*
 132         * Take this CPU offline.  Once we clear this, we can't return,
 133         * and we must not schedule until we're ready to give up the cpu.
 134         */
 135        set_cpu_online(cpu, false);
 136
 137        /*
 138         * OK - migrate IRQs away from this CPU
 139         */
 140        migrate_irqs();
 141
 142        /*
 143         * Flush user cache and TLB mappings, and then remove this CPU
 144         * from the vm mask set of all processes.
 145         */
 146        flush_cache_all();
 147#ifdef CONFIG_MMU
 148        local_flush_tlb_all();
 149#endif
 150
 151        clear_tasks_mm_cpumask(cpu);
 152
 153        return 0;
 154}
 155#else /* ... !CONFIG_HOTPLUG_CPU */
 156int native_cpu_disable(unsigned int cpu)
 157{
 158        return -ENOSYS;
 159}
 160
 161void native_cpu_die(unsigned int cpu)
 162{
 163        /* We said "no" in __cpu_disable */
 164        BUG();
 165}
 166
 167void native_play_dead(void)
 168{
 169        BUG();
 170}
 171#endif
 172
 173asmlinkage void start_secondary(void)
 174{
 175        unsigned int cpu = smp_processor_id();
 176        struct mm_struct *mm = &init_mm;
 177
 178        enable_mmu();
 179        mmgrab(mm);
 180        mmget(mm);
 181        current->active_mm = mm;
 182#ifdef CONFIG_MMU
 183        enter_lazy_tlb(mm, current);
 184        local_flush_tlb_all();
 185#endif
 186
 187        per_cpu_trap_init();
 188
 189        preempt_disable();
 190
 191        notify_cpu_starting(cpu);
 192
 193        local_irq_enable();
 194
 195        calibrate_delay();
 196
 197        smp_store_cpu_info(cpu);
 198
 199        set_cpu_online(cpu, true);
 200        per_cpu(cpu_state, cpu) = CPU_ONLINE;
 201
 202        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 203}
 204
 205extern struct {
 206        unsigned long sp;
 207        unsigned long bss_start;
 208        unsigned long bss_end;
 209        void *start_kernel_fn;
 210        void *cpu_init_fn;
 211        void *thread_info;
 212} stack_start;
 213
 214int __cpu_up(unsigned int cpu, struct task_struct *tsk)
 215{
 216        unsigned long timeout;
 217
 218        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 219
 220        /* Fill in data in head.S for secondary cpus */
 221        stack_start.sp = tsk->thread.sp;
 222        stack_start.thread_info = tsk->stack;
 223        stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
 224        stack_start.start_kernel_fn = start_secondary;
 225
 226        flush_icache_range((unsigned long)&stack_start,
 227                           (unsigned long)&stack_start + sizeof(stack_start));
 228        wmb();
 229
 230        mp_ops->start_cpu(cpu, (unsigned long)_stext);
 231
 232        timeout = jiffies + HZ;
 233        while (time_before(jiffies, timeout)) {
 234                if (cpu_online(cpu))
 235                        break;
 236
 237                udelay(10);
 238                barrier();
 239        }
 240
 241        if (cpu_online(cpu))
 242                return 0;
 243
 244        return -ENOENT;
 245}
 246
 247void __init smp_cpus_done(unsigned int max_cpus)
 248{
 249        unsigned long bogosum = 0;
 250        int cpu;
 251
 252        for_each_online_cpu(cpu)
 253                bogosum += cpu_data[cpu].loops_per_jiffy;
 254
 255        printk(KERN_INFO "SMP: Total of %d processors activated "
 256               "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
 257               bogosum / (500000/HZ),
 258               (bogosum / (5000/HZ)) % 100);
 259}
 260
 261void smp_send_reschedule(int cpu)
 262{
 263        mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
 264}
 265
 266void smp_send_stop(void)
 267{
 268        smp_call_function(stop_this_cpu, 0, 0);
 269}
 270
 271void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 272{
 273        int cpu;
 274
 275        for_each_cpu(cpu, mask)
 276                mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
 277}
 278
 279void arch_send_call_function_single_ipi(int cpu)
 280{
 281        mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
 282}
 283
 284#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 285void tick_broadcast(const struct cpumask *mask)
 286{
 287        int cpu;
 288
 289        for_each_cpu(cpu, mask)
 290                mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
 291}
 292
 293static void ipi_timer(void)
 294{
 295        irq_enter();
 296        tick_receive_broadcast();
 297        irq_exit();
 298}
 299#endif
 300
 301void smp_message_recv(unsigned int msg)
 302{
 303        switch (msg) {
 304        case SMP_MSG_FUNCTION:
 305                generic_smp_call_function_interrupt();
 306                break;
 307        case SMP_MSG_RESCHEDULE:
 308                scheduler_ipi();
 309                break;
 310        case SMP_MSG_FUNCTION_SINGLE:
 311                generic_smp_call_function_single_interrupt();
 312                break;
 313#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 314        case SMP_MSG_TIMER:
 315                ipi_timer();
 316                break;
 317#endif
 318        default:
 319                printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
 320                       smp_processor_id(), __func__, msg);
 321                break;
 322        }
 323}
 324
 325/* Not really SMP stuff ... */
 326int setup_profiling_timer(unsigned int multiplier)
 327{
 328        return 0;
 329}
 330
 331#ifdef CONFIG_MMU
 332
 333static void flush_tlb_all_ipi(void *info)
 334{
 335        local_flush_tlb_all();
 336}
 337
 338void flush_tlb_all(void)
 339{
 340        on_each_cpu(flush_tlb_all_ipi, 0, 1);
 341}
 342
 343static void flush_tlb_mm_ipi(void *mm)
 344{
 345        local_flush_tlb_mm((struct mm_struct *)mm);
 346}
 347
 348/*
 349 * The following tlb flush calls are invoked when old translations are
 350 * being torn down, or pte attributes are changing. For single threaded
 351 * address spaces, a new context is obtained on the current cpu, and tlb
 352 * context on other cpus are invalidated to force a new context allocation
 353 * at switch_mm time, should the mm ever be used on other cpus. For
 354 * multithreaded address spaces, intercpu interrupts have to be sent.
 355 * Another case where intercpu interrupts are required is when the target
 356 * mm might be active on another cpu (eg debuggers doing the flushes on
 357 * behalf of debugees, kswapd stealing pages from another process etc).
 358 * Kanoj 07/00.
 359 */
 360void flush_tlb_mm(struct mm_struct *mm)
 361{
 362        preempt_disable();
 363
 364        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 365                smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
 366        } else {
 367                int i;
 368                for_each_online_cpu(i)
 369                        if (smp_processor_id() != i)
 370                                cpu_context(i, mm) = 0;
 371        }
 372        local_flush_tlb_mm(mm);
 373
 374        preempt_enable();
 375}
 376
 377struct flush_tlb_data {
 378        struct vm_area_struct *vma;
 379        unsigned long addr1;
 380        unsigned long addr2;
 381};
 382
 383static void flush_tlb_range_ipi(void *info)
 384{
 385        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 386
 387        local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
 388}
 389
 390void flush_tlb_range(struct vm_area_struct *vma,
 391                     unsigned long start, unsigned long end)
 392{
 393        struct mm_struct *mm = vma->vm_mm;
 394
 395        preempt_disable();
 396        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 397                struct flush_tlb_data fd;
 398
 399                fd.vma = vma;
 400                fd.addr1 = start;
 401                fd.addr2 = end;
 402                smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
 403        } else {
 404                int i;
 405                for_each_online_cpu(i)
 406                        if (smp_processor_id() != i)
 407                                cpu_context(i, mm) = 0;
 408        }
 409        local_flush_tlb_range(vma, start, end);
 410        preempt_enable();
 411}
 412
 413static void flush_tlb_kernel_range_ipi(void *info)
 414{
 415        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 416
 417        local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
 418}
 419
 420void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 421{
 422        struct flush_tlb_data fd;
 423
 424        fd.addr1 = start;
 425        fd.addr2 = end;
 426        on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
 427}
 428
 429static void flush_tlb_page_ipi(void *info)
 430{
 431        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 432
 433        local_flush_tlb_page(fd->vma, fd->addr1);
 434}
 435
 436void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 437{
 438        preempt_disable();
 439        if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
 440            (current->mm != vma->vm_mm)) {
 441                struct flush_tlb_data fd;
 442
 443                fd.vma = vma;
 444                fd.addr1 = page;
 445                smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
 446        } else {
 447                int i;
 448                for_each_online_cpu(i)
 449                        if (smp_processor_id() != i)
 450                                cpu_context(i, vma->vm_mm) = 0;
 451        }
 452        local_flush_tlb_page(vma, page);
 453        preempt_enable();
 454}
 455
 456static void flush_tlb_one_ipi(void *info)
 457{
 458        struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
 459        local_flush_tlb_one(fd->addr1, fd->addr2);
 460}
 461
 462void flush_tlb_one(unsigned long asid, unsigned long vaddr)
 463{
 464        struct flush_tlb_data fd;
 465
 466        fd.addr1 = asid;
 467        fd.addr2 = vaddr;
 468
 469        smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
 470        local_flush_tlb_one(asid, vaddr);
 471}
 472
 473#endif
 474