linux/arch/xtensa/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * Xtensa SMP support functions.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2008 - 2013 Tensilica Inc.
   9 *
  10 * Chris Zankel <chris@zankel.net>
  11 * Joe Taylor <joe@tensilica.com>
  12 * Pete Delaney <piet@tensilica.com
  13 */
  14
  15#include <linux/cpu.h>
  16#include <linux/cpumask.h>
  17#include <linux/delay.h>
  18#include <linux/init.h>
  19#include <linux/interrupt.h>
  20#include <linux/irqdomain.h>
  21#include <linux/irq.h>
  22#include <linux/kdebug.h>
  23#include <linux/module.h>
  24#include <linux/reboot.h>
  25#include <linux/seq_file.h>
  26#include <linux/smp.h>
  27#include <linux/thread_info.h>
  28
  29#include <asm/cacheflush.h>
  30#include <asm/kdebug.h>
  31#include <asm/mmu_context.h>
  32#include <asm/mxregs.h>
  33#include <asm/platform.h>
  34#include <asm/tlbflush.h>
  35#include <asm/traps.h>
  36
  37#ifdef CONFIG_SMP
  38# if XCHAL_HAVE_S32C1I == 0
  39#  error "The S32C1I option is required for SMP."
  40# endif
  41#endif
  42
  43static void system_invalidate_dcache_range(unsigned long start,
  44                unsigned long size);
  45static void system_flush_invalidate_dcache_range(unsigned long start,
  46                unsigned long size);
  47
  48/* IPI (Inter Process Interrupt) */
  49
  50#define IPI_IRQ 0
  51
  52static irqreturn_t ipi_interrupt(int irq, void *dev_id);
  53static struct irqaction ipi_irqaction = {
  54        .handler =      ipi_interrupt,
  55        .flags =        IRQF_PERCPU,
  56        .name =         "ipi",
  57};
  58
  59void ipi_init(void)
  60{
  61        unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
  62        setup_irq(irq, &ipi_irqaction);
  63}
  64
  65static inline unsigned int get_core_count(void)
  66{
  67        /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
  68        unsigned int syscfgid = get_er(SYSCFGID);
  69        return ((syscfgid >> 18) & 0xf) + 1;
  70}
  71
  72static inline int get_core_id(void)
  73{
  74        /* Bits 0...18 of SYSCFGID contain the core id  */
  75        unsigned int core_id = get_er(SYSCFGID);
  76        return core_id & 0x3fff;
  77}
  78
  79void __init smp_prepare_cpus(unsigned int max_cpus)
  80{
  81        unsigned i;
  82
  83        for (i = 0; i < max_cpus; ++i)
  84                set_cpu_present(i, true);
  85}
  86
  87void __init smp_init_cpus(void)
  88{
  89        unsigned i;
  90        unsigned int ncpus = get_core_count();
  91        unsigned int core_id = get_core_id();
  92
  93        pr_info("%s: Core Count = %d\n", __func__, ncpus);
  94        pr_info("%s: Core Id = %d\n", __func__, core_id);
  95
  96        for (i = 0; i < ncpus; ++i)
  97                set_cpu_possible(i, true);
  98}
  99
 100void __init smp_prepare_boot_cpu(void)
 101{
 102        unsigned int cpu = smp_processor_id();
 103        BUG_ON(cpu != 0);
 104        cpu_asid_cache(cpu) = ASID_USER_FIRST;
 105}
 106
 107void __init smp_cpus_done(unsigned int max_cpus)
 108{
 109}
 110
 111static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
 112static DECLARE_COMPLETION(cpu_running);
 113
 114void secondary_start_kernel(void)
 115{
 116        struct mm_struct *mm = &init_mm;
 117        unsigned int cpu = smp_processor_id();
 118
 119        init_mmu();
 120
 121#ifdef CONFIG_DEBUG_KERNEL
 122        if (boot_secondary_processors == 0) {
 123                pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
 124                        __func__, boot_secondary_processors, cpu);
 125                for (;;)
 126                        __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
 127        }
 128
 129        pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
 130                __func__, boot_secondary_processors, cpu);
 131#endif
 132        /* Init EXCSAVE1 */
 133
 134        secondary_trap_init();
 135
 136        /* All kernel threads share the same mm context. */
 137
 138        atomic_inc(&mm->mm_users);
 139        atomic_inc(&mm->mm_count);
 140        current->active_mm = mm;
 141        cpumask_set_cpu(cpu, mm_cpumask(mm));
 142        enter_lazy_tlb(mm, current);
 143
 144        preempt_disable();
 145        trace_hardirqs_off();
 146
 147        calibrate_delay();
 148
 149        notify_cpu_starting(cpu);
 150
 151        secondary_init_irq();
 152        local_timer_setup(cpu);
 153
 154        set_cpu_online(cpu, true);
 155
 156        local_irq_enable();
 157
 158        complete(&cpu_running);
 159
 160        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 161}
 162
 163static void mx_cpu_start(void *p)
 164{
 165        unsigned cpu = (unsigned)p;
 166        unsigned long run_stall_mask = get_er(MPSCORE);
 167
 168        set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
 169        pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
 170                        __func__, cpu, run_stall_mask, get_er(MPSCORE));
 171}
 172
 173static void mx_cpu_stop(void *p)
 174{
 175        unsigned cpu = (unsigned)p;
 176        unsigned long run_stall_mask = get_er(MPSCORE);
 177
 178        set_er(run_stall_mask | (1u << cpu), MPSCORE);
 179        pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
 180                        __func__, cpu, run_stall_mask, get_er(MPSCORE));
 181}
 182
 183#ifdef CONFIG_HOTPLUG_CPU
 184unsigned long cpu_start_id __cacheline_aligned;
 185#endif
 186unsigned long cpu_start_ccount;
 187
 188static int boot_secondary(unsigned int cpu, struct task_struct *ts)
 189{
 190        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 191        unsigned long ccount;
 192        int i;
 193
 194#ifdef CONFIG_HOTPLUG_CPU
 195        cpu_start_id = cpu;
 196        system_flush_invalidate_dcache_range(
 197                        (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
 198#endif
 199        smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
 200
 201        for (i = 0; i < 2; ++i) {
 202                do
 203                        ccount = get_ccount();
 204                while (!ccount);
 205
 206                cpu_start_ccount = ccount;
 207
 208                while (time_before(jiffies, timeout)) {
 209                        mb();
 210                        if (!cpu_start_ccount)
 211                                break;
 212                }
 213
 214                if (cpu_start_ccount) {
 215                        smp_call_function_single(0, mx_cpu_stop,
 216                                        (void *)cpu, 1);
 217                        cpu_start_ccount = 0;
 218                        return -EIO;
 219                }
 220        }
 221        return 0;
 222}
 223
 224int __cpu_up(unsigned int cpu, struct task_struct *idle)
 225{
 226        int ret = 0;
 227
 228        if (cpu_asid_cache(cpu) == 0)
 229                cpu_asid_cache(cpu) = ASID_USER_FIRST;
 230
 231        start_info.stack = (unsigned long)task_pt_regs(idle);
 232        wmb();
 233
 234        pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
 235                        __func__, cpu, idle, start_info.stack);
 236
 237        ret = boot_secondary(cpu, idle);
 238        if (ret == 0) {
 239                wait_for_completion_timeout(&cpu_running,
 240                                msecs_to_jiffies(1000));
 241                if (!cpu_online(cpu))
 242                        ret = -EIO;
 243        }
 244
 245        if (ret)
 246                pr_err("CPU %u failed to boot\n", cpu);
 247
 248        return ret;
 249}
 250
 251#ifdef CONFIG_HOTPLUG_CPU
 252
 253/*
 254 * __cpu_disable runs on the processor to be shutdown.
 255 */
 256int __cpu_disable(void)
 257{
 258        unsigned int cpu = smp_processor_id();
 259
 260        /*
 261         * Take this CPU offline.  Once we clear this, we can't return,
 262         * and we must not schedule until we're ready to give up the cpu.
 263         */
 264        set_cpu_online(cpu, false);
 265
 266        /*
 267         * OK - migrate IRQs away from this CPU
 268         */
 269        migrate_irqs();
 270
 271        /*
 272         * Flush user cache and TLB mappings, and then remove this CPU
 273         * from the vm mask set of all processes.
 274         */
 275        local_flush_cache_all();
 276        local_flush_tlb_all();
 277        invalidate_page_directory();
 278
 279        clear_tasks_mm_cpumask(cpu);
 280
 281        return 0;
 282}
 283
 284static void platform_cpu_kill(unsigned int cpu)
 285{
 286        smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
 287}
 288
 289/*
 290 * called on the thread which is asking for a CPU to be shutdown -
 291 * waits until shutdown has completed, or it is timed out.
 292 */
 293void __cpu_die(unsigned int cpu)
 294{
 295        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 296        while (time_before(jiffies, timeout)) {
 297                system_invalidate_dcache_range((unsigned long)&cpu_start_id,
 298                                sizeof(cpu_start_id));
 299                if (cpu_start_id == -cpu) {
 300                        platform_cpu_kill(cpu);
 301                        return;
 302                }
 303        }
 304        pr_err("CPU%u: unable to kill\n", cpu);
 305}
 306
 307void arch_cpu_idle_dead(void)
 308{
 309        cpu_die();
 310}
 311/*
 312 * Called from the idle thread for the CPU which has been shutdown.
 313 *
 314 * Note that we disable IRQs here, but do not re-enable them
 315 * before returning to the caller. This is also the behaviour
 316 * of the other hotplug-cpu capable cores, so presumably coming
 317 * out of idle fixes this.
 318 */
 319void __ref cpu_die(void)
 320{
 321        idle_task_exit();
 322        local_irq_disable();
 323        __asm__ __volatile__(
 324                        "       movi    a2, cpu_restart\n"
 325                        "       jx      a2\n");
 326}
 327
 328#endif /* CONFIG_HOTPLUG_CPU */
 329
 330enum ipi_msg_type {
 331        IPI_RESCHEDULE = 0,
 332        IPI_CALL_FUNC,
 333        IPI_CPU_STOP,
 334        IPI_MAX
 335};
 336
 337static const struct {
 338        const char *short_text;
 339        const char *long_text;
 340} ipi_text[] = {
 341        { .short_text = "RES", .long_text = "Rescheduling interrupts" },
 342        { .short_text = "CAL", .long_text = "Function call interrupts" },
 343        { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
 344};
 345
 346struct ipi_data {
 347        unsigned long ipi_count[IPI_MAX];
 348};
 349
 350static DEFINE_PER_CPU(struct ipi_data, ipi_data);
 351
 352static void send_ipi_message(const struct cpumask *callmask,
 353                enum ipi_msg_type msg_id)
 354{
 355        int index;
 356        unsigned long mask = 0;
 357
 358        for_each_cpu(index, callmask)
 359                if (index != smp_processor_id())
 360                        mask |= 1 << index;
 361
 362        set_er(mask, MIPISET(msg_id));
 363}
 364
 365void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 366{
 367        send_ipi_message(mask, IPI_CALL_FUNC);
 368}
 369
 370void arch_send_call_function_single_ipi(int cpu)
 371{
 372        send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
 373}
 374
 375void smp_send_reschedule(int cpu)
 376{
 377        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 378}
 379
 380void smp_send_stop(void)
 381{
 382        struct cpumask targets;
 383
 384        cpumask_copy(&targets, cpu_online_mask);
 385        cpumask_clear_cpu(smp_processor_id(), &targets);
 386        send_ipi_message(&targets, IPI_CPU_STOP);
 387}
 388
 389static void ipi_cpu_stop(unsigned int cpu)
 390{
 391        set_cpu_online(cpu, false);
 392        machine_halt();
 393}
 394
 395irqreturn_t ipi_interrupt(int irq, void *dev_id)
 396{
 397        unsigned int cpu = smp_processor_id();
 398        struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
 399        unsigned int msg;
 400        unsigned i;
 401
 402        msg = get_er(MIPICAUSE(cpu));
 403        for (i = 0; i < IPI_MAX; i++)
 404                if (msg & (1 << i)) {
 405                        set_er(1 << i, MIPICAUSE(cpu));
 406                        ++ipi->ipi_count[i];
 407                }
 408
 409        if (msg & (1 << IPI_RESCHEDULE))
 410                scheduler_ipi();
 411        if (msg & (1 << IPI_CALL_FUNC))
 412                generic_smp_call_function_interrupt();
 413        if (msg & (1 << IPI_CPU_STOP))
 414                ipi_cpu_stop(cpu);
 415
 416        return IRQ_HANDLED;
 417}
 418
 419void show_ipi_list(struct seq_file *p, int prec)
 420{
 421        unsigned int cpu;
 422        unsigned i;
 423
 424        for (i = 0; i < IPI_MAX; ++i) {
 425                seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
 426                for_each_online_cpu(cpu)
 427                        seq_printf(p, " %10lu",
 428                                        per_cpu(ipi_data, cpu).ipi_count[i]);
 429                seq_printf(p, "   %s\n", ipi_text[i].long_text);
 430        }
 431}
 432
 433int setup_profiling_timer(unsigned int multiplier)
 434{
 435        pr_debug("setup_profiling_timer %d\n", multiplier);
 436        return 0;
 437}
 438
 439/* TLB flush functions */
 440
 441struct flush_data {
 442        struct vm_area_struct *vma;
 443        unsigned long addr1;
 444        unsigned long addr2;
 445};
 446
 447static void ipi_flush_tlb_all(void *arg)
 448{
 449        local_flush_tlb_all();
 450}
 451
 452void flush_tlb_all(void)
 453{
 454        on_each_cpu(ipi_flush_tlb_all, NULL, 1);
 455}
 456
 457static void ipi_flush_tlb_mm(void *arg)
 458{
 459        local_flush_tlb_mm(arg);
 460}
 461
 462void flush_tlb_mm(struct mm_struct *mm)
 463{
 464        on_each_cpu(ipi_flush_tlb_mm, mm, 1);
 465}
 466
 467static void ipi_flush_tlb_page(void *arg)
 468{
 469        struct flush_data *fd = arg;
 470        local_flush_tlb_page(fd->vma, fd->addr1);
 471}
 472
 473void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 474{
 475        struct flush_data fd = {
 476                .vma = vma,
 477                .addr1 = addr,
 478        };
 479        on_each_cpu(ipi_flush_tlb_page, &fd, 1);
 480}
 481
 482static void ipi_flush_tlb_range(void *arg)
 483{
 484        struct flush_data *fd = arg;
 485        local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
 486}
 487
 488void flush_tlb_range(struct vm_area_struct *vma,
 489                     unsigned long start, unsigned long end)
 490{
 491        struct flush_data fd = {
 492                .vma = vma,
 493                .addr1 = start,
 494                .addr2 = end,
 495        };
 496        on_each_cpu(ipi_flush_tlb_range, &fd, 1);
 497}
 498
 499static void ipi_flush_tlb_kernel_range(void *arg)
 500{
 501        struct flush_data *fd = arg;
 502        local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
 503}
 504
 505void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 506{
 507        struct flush_data fd = {
 508                .addr1 = start,
 509                .addr2 = end,
 510        };
 511        on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
 512}
 513
 514/* Cache flush functions */
 515
 516static void ipi_flush_cache_all(void *arg)
 517{
 518        local_flush_cache_all();
 519}
 520
 521void flush_cache_all(void)
 522{
 523        on_each_cpu(ipi_flush_cache_all, NULL, 1);
 524}
 525
 526static void ipi_flush_cache_page(void *arg)
 527{
 528        struct flush_data *fd = arg;
 529        local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
 530}
 531
 532void flush_cache_page(struct vm_area_struct *vma,
 533                     unsigned long address, unsigned long pfn)
 534{
 535        struct flush_data fd = {
 536                .vma = vma,
 537                .addr1 = address,
 538                .addr2 = pfn,
 539        };
 540        on_each_cpu(ipi_flush_cache_page, &fd, 1);
 541}
 542
 543static void ipi_flush_cache_range(void *arg)
 544{
 545        struct flush_data *fd = arg;
 546        local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
 547}
 548
 549void flush_cache_range(struct vm_area_struct *vma,
 550                     unsigned long start, unsigned long end)
 551{
 552        struct flush_data fd = {
 553                .vma = vma,
 554                .addr1 = start,
 555                .addr2 = end,
 556        };
 557        on_each_cpu(ipi_flush_cache_range, &fd, 1);
 558}
 559
 560static void ipi_flush_icache_range(void *arg)
 561{
 562        struct flush_data *fd = arg;
 563        local_flush_icache_range(fd->addr1, fd->addr2);
 564}
 565
 566void flush_icache_range(unsigned long start, unsigned long end)
 567{
 568        struct flush_data fd = {
 569                .addr1 = start,
 570                .addr2 = end,
 571        };
 572        on_each_cpu(ipi_flush_icache_range, &fd, 1);
 573}
 574EXPORT_SYMBOL(flush_icache_range);
 575
 576/* ------------------------------------------------------------------------- */
 577
 578static void ipi_invalidate_dcache_range(void *arg)
 579{
 580        struct flush_data *fd = arg;
 581        __invalidate_dcache_range(fd->addr1, fd->addr2);
 582}
 583
 584static void system_invalidate_dcache_range(unsigned long start,
 585                unsigned long size)
 586{
 587        struct flush_data fd = {
 588                .addr1 = start,
 589                .addr2 = size,
 590        };
 591        on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
 592}
 593
 594static void ipi_flush_invalidate_dcache_range(void *arg)
 595{
 596        struct flush_data *fd = arg;
 597        __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
 598}
 599
 600static void system_flush_invalidate_dcache_range(unsigned long start,
 601                unsigned long size)
 602{
 603        struct flush_data fd = {
 604                .addr1 = start,
 605                .addr2 = size,
 606        };
 607        on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
 608}
 609