linux/arch/mips/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or
   3 * modify it under the terms of the GNU General Public License
   4 * as published by the Free Software Foundation; either version 2
   5 * of the License, or (at your option) any later version.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  15 *
  16 * Copyright (C) 2000, 2001 Kanoj Sarcar
  17 * Copyright (C) 2000, 2001 Ralf Baechle
  18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  20 */
  21#include <linux/cache.h>
  22#include <linux/delay.h>
  23#include <linux/init.h>
  24#include <linux/interrupt.h>
  25#include <linux/smp.h>
  26#include <linux/spinlock.h>
  27#include <linux/threads.h>
  28#include <linux/export.h>
  29#include <linux/time.h>
  30#include <linux/timex.h>
  31#include <linux/sched/mm.h>
  32#include <linux/cpumask.h>
  33#include <linux/cpu.h>
  34#include <linux/err.h>
  35#include <linux/ftrace.h>
  36#include <linux/irqdomain.h>
  37#include <linux/of.h>
  38#include <linux/of_irq.h>
  39
  40#include <linux/atomic.h>
  41#include <asm/cpu.h>
  42#include <asm/processor.h>
  43#include <asm/idle.h>
  44#include <asm/r4k-timer.h>
  45#include <asm/mips-cps.h>
  46#include <asm/mmu_context.h>
  47#include <asm/time.h>
  48#include <asm/setup.h>
  49#include <asm/maar.h>
  50
  51int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
  52EXPORT_SYMBOL(__cpu_number_map);
  53
  54int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
  55EXPORT_SYMBOL(__cpu_logical_map);
  56
  57/* Number of TCs (or siblings in Intel speak) per CPU core */
  58int smp_num_siblings = 1;
  59EXPORT_SYMBOL(smp_num_siblings);
  60
  61/* representing the TCs (or siblings in Intel speak) of each logical CPU */
  62cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
  63EXPORT_SYMBOL(cpu_sibling_map);
  64
  65/* representing the core map of multi-core chips of each logical CPU */
  66cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
  67EXPORT_SYMBOL(cpu_core_map);
  68
  69static DECLARE_COMPLETION(cpu_starting);
  70static DECLARE_COMPLETION(cpu_running);
  71
  72/*
  73 * A logcal cpu mask containing only one VPE per core to
  74 * reduce the number of IPIs on large MT systems.
  75 */
  76cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
  77EXPORT_SYMBOL(cpu_foreign_map);
  78
  79/* representing cpus for which sibling maps can be computed */
  80static cpumask_t cpu_sibling_setup_map;
  81
  82/* representing cpus for which core maps can be computed */
  83static cpumask_t cpu_core_setup_map;
  84
  85cpumask_t cpu_coherent_mask;
  86
  87#ifdef CONFIG_GENERIC_IRQ_IPI
  88static struct irq_desc *call_desc;
  89static struct irq_desc *sched_desc;
  90#endif
  91
  92static inline void set_cpu_sibling_map(int cpu)
  93{
  94        int i;
  95
  96        cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
  97
  98        if (smp_num_siblings > 1) {
  99                for_each_cpu(i, &cpu_sibling_setup_map) {
 100                        if (cpus_are_siblings(cpu, i)) {
 101                                cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
 102                                cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
 103                        }
 104                }
 105        } else
 106                cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
 107}
 108
 109static inline void set_cpu_core_map(int cpu)
 110{
 111        int i;
 112
 113        cpumask_set_cpu(cpu, &cpu_core_setup_map);
 114
 115        for_each_cpu(i, &cpu_core_setup_map) {
 116                if (cpu_data[cpu].package == cpu_data[i].package) {
 117                        cpumask_set_cpu(i, &cpu_core_map[cpu]);
 118                        cpumask_set_cpu(cpu, &cpu_core_map[i]);
 119                }
 120        }
 121}
 122
 123/*
 124 * Calculate a new cpu_foreign_map mask whenever a
 125 * new cpu appears or disappears.
 126 */
 127void calculate_cpu_foreign_map(void)
 128{
 129        int i, k, core_present;
 130        cpumask_t temp_foreign_map;
 131
 132        /* Re-calculate the mask */
 133        cpumask_clear(&temp_foreign_map);
 134        for_each_online_cpu(i) {
 135                core_present = 0;
 136                for_each_cpu(k, &temp_foreign_map)
 137                        if (cpus_are_siblings(i, k))
 138                                core_present = 1;
 139                if (!core_present)
 140                        cpumask_set_cpu(i, &temp_foreign_map);
 141        }
 142
 143        for_each_online_cpu(i)
 144                cpumask_andnot(&cpu_foreign_map[i],
 145                               &temp_foreign_map, &cpu_sibling_map[i]);
 146}
 147
 148const struct plat_smp_ops *mp_ops;
 149EXPORT_SYMBOL(mp_ops);
 150
 151void register_smp_ops(const struct plat_smp_ops *ops)
 152{
 153        if (mp_ops)
 154                printk(KERN_WARNING "Overriding previously set SMP ops\n");
 155
 156        mp_ops = ops;
 157}
 158
 159#ifdef CONFIG_GENERIC_IRQ_IPI
 160void mips_smp_send_ipi_single(int cpu, unsigned int action)
 161{
 162        mips_smp_send_ipi_mask(cpumask_of(cpu), action);
 163}
 164
 165void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 166{
 167        unsigned long flags;
 168        unsigned int core;
 169        int cpu;
 170
 171        local_irq_save(flags);
 172
 173        switch (action) {
 174        case SMP_CALL_FUNCTION:
 175                __ipi_send_mask(call_desc, mask);
 176                break;
 177
 178        case SMP_RESCHEDULE_YOURSELF:
 179                __ipi_send_mask(sched_desc, mask);
 180                break;
 181
 182        default:
 183                BUG();
 184        }
 185
 186        if (mips_cpc_present()) {
 187                for_each_cpu(cpu, mask) {
 188                        if (cpus_are_siblings(cpu, smp_processor_id()))
 189                                continue;
 190
 191                        core = cpu_core(&cpu_data[cpu]);
 192
 193                        while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
 194                                mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
 195                                mips_cpc_lock_other(core);
 196                                write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
 197                                mips_cpc_unlock_other();
 198                                mips_cm_unlock_other();
 199                        }
 200                }
 201        }
 202
 203        local_irq_restore(flags);
 204}
 205
 206
 207static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 208{
 209        scheduler_ipi();
 210
 211        return IRQ_HANDLED;
 212}
 213
 214static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 215{
 216        generic_smp_call_function_interrupt();
 217
 218        return IRQ_HANDLED;
 219}
 220
 221static struct irqaction irq_resched = {
 222        .handler        = ipi_resched_interrupt,
 223        .flags          = IRQF_PERCPU,
 224        .name           = "IPI resched"
 225};
 226
 227static struct irqaction irq_call = {
 228        .handler        = ipi_call_interrupt,
 229        .flags          = IRQF_PERCPU,
 230        .name           = "IPI call"
 231};
 232
 233static void smp_ipi_init_one(unsigned int virq,
 234                                    struct irqaction *action)
 235{
 236        int ret;
 237
 238        irq_set_handler(virq, handle_percpu_irq);
 239        ret = setup_irq(virq, action);
 240        BUG_ON(ret);
 241}
 242
 243static unsigned int call_virq, sched_virq;
 244
 245int mips_smp_ipi_allocate(const struct cpumask *mask)
 246{
 247        int virq;
 248        struct irq_domain *ipidomain;
 249        struct device_node *node;
 250
 251        node = of_irq_find_parent(of_root);
 252        ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
 253
 254        /*
 255         * Some platforms have half DT setup. So if we found irq node but
 256         * didn't find an ipidomain, try to search for one that is not in the
 257         * DT.
 258         */
 259        if (node && !ipidomain)
 260                ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
 261
 262        /*
 263         * There are systems which use IPI IRQ domains, but only have one
 264         * registered when some runtime condition is met. For example a Malta
 265         * kernel may include support for GIC & CPU interrupt controller IPI
 266         * IRQ domains, but if run on a system with no GIC & no MT ASE then
 267         * neither will be supported or registered.
 268         *
 269         * We only have a problem if we're actually using multiple CPUs so fail
 270         * loudly if that is the case. Otherwise simply return, skipping IPI
 271         * setup, if we're running with only a single CPU.
 272         */
 273        if (!ipidomain) {
 274                BUG_ON(num_present_cpus() > 1);
 275                return 0;
 276        }
 277
 278        virq = irq_reserve_ipi(ipidomain, mask);
 279        BUG_ON(!virq);
 280        if (!call_virq)
 281                call_virq = virq;
 282
 283        virq = irq_reserve_ipi(ipidomain, mask);
 284        BUG_ON(!virq);
 285        if (!sched_virq)
 286                sched_virq = virq;
 287
 288        if (irq_domain_is_ipi_per_cpu(ipidomain)) {
 289                int cpu;
 290
 291                for_each_cpu(cpu, mask) {
 292                        smp_ipi_init_one(call_virq + cpu, &irq_call);
 293                        smp_ipi_init_one(sched_virq + cpu, &irq_resched);
 294                }
 295        } else {
 296                smp_ipi_init_one(call_virq, &irq_call);
 297                smp_ipi_init_one(sched_virq, &irq_resched);
 298        }
 299
 300        return 0;
 301}
 302
 303int mips_smp_ipi_free(const struct cpumask *mask)
 304{
 305        struct irq_domain *ipidomain;
 306        struct device_node *node;
 307
 308        node = of_irq_find_parent(of_root);
 309        ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
 310
 311        /*
 312         * Some platforms have half DT setup. So if we found irq node but
 313         * didn't find an ipidomain, try to search for one that is not in the
 314         * DT.
 315         */
 316        if (node && !ipidomain)
 317                ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
 318
 319        BUG_ON(!ipidomain);
 320
 321        if (irq_domain_is_ipi_per_cpu(ipidomain)) {
 322                int cpu;
 323
 324                for_each_cpu(cpu, mask) {
 325                        remove_irq(call_virq + cpu, &irq_call);
 326                        remove_irq(sched_virq + cpu, &irq_resched);
 327                }
 328        }
 329        irq_destroy_ipi(call_virq, mask);
 330        irq_destroy_ipi(sched_virq, mask);
 331        return 0;
 332}
 333
 334
 335static int __init mips_smp_ipi_init(void)
 336{
 337        if (num_possible_cpus() == 1)
 338                return 0;
 339
 340        mips_smp_ipi_allocate(cpu_possible_mask);
 341
 342        call_desc = irq_to_desc(call_virq);
 343        sched_desc = irq_to_desc(sched_virq);
 344
 345        return 0;
 346}
 347early_initcall(mips_smp_ipi_init);
 348#endif
 349
 350/*
 351 * First C code run on the secondary CPUs after being started up by
 352 * the master.
 353 */
 354asmlinkage void start_secondary(void)
 355{
 356        unsigned int cpu;
 357
 358        cpu_probe();
 359        per_cpu_trap_init(false);
 360        mips_clockevent_init();
 361        mp_ops->init_secondary();
 362        cpu_report();
 363        maar_init();
 364
 365        /*
 366         * XXX parity protection should be folded in here when it's converted
 367         * to an option instead of something based on .cputype
 368         */
 369
 370        calibrate_delay();
 371        preempt_disable();
 372        cpu = smp_processor_id();
 373        cpu_data[cpu].udelay_val = loops_per_jiffy;
 374
 375        cpumask_set_cpu(cpu, &cpu_coherent_mask);
 376        notify_cpu_starting(cpu);
 377
 378        /* Notify boot CPU that we're starting & ready to sync counters */
 379        complete(&cpu_starting);
 380
 381        synchronise_count_slave(cpu);
 382
 383        /* The CPU is running and counters synchronised, now mark it online */
 384        set_cpu_online(cpu, true);
 385
 386        set_cpu_sibling_map(cpu);
 387        set_cpu_core_map(cpu);
 388
 389        calculate_cpu_foreign_map();
 390
 391        /*
 392         * Notify boot CPU that we're up & online and it can safely return
 393         * from __cpu_up
 394         */
 395        complete(&cpu_running);
 396
 397        /*
 398         * irq will be enabled in ->smp_finish(), enabling it too early
 399         * is dangerous.
 400         */
 401        WARN_ON_ONCE(!irqs_disabled());
 402        mp_ops->smp_finish();
 403
 404        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 405}
 406
 407static void stop_this_cpu(void *dummy)
 408{
 409        /*
 410         * Remove this CPU:
 411         */
 412
 413        set_cpu_online(smp_processor_id(), false);
 414        calculate_cpu_foreign_map();
 415        local_irq_disable();
 416        while (1);
 417}
 418
 419void smp_send_stop(void)
 420{
 421        smp_call_function(stop_this_cpu, NULL, 0);
 422}
 423
 424void __init smp_cpus_done(unsigned int max_cpus)
 425{
 426}
 427
 428/* called from main before smp_init() */
 429void __init smp_prepare_cpus(unsigned int max_cpus)
 430{
 431        init_new_context(current, &init_mm);
 432        current_thread_info()->cpu = 0;
 433        mp_ops->prepare_cpus(max_cpus);
 434        set_cpu_sibling_map(0);
 435        set_cpu_core_map(0);
 436        calculate_cpu_foreign_map();
 437#ifndef CONFIG_HOTPLUG_CPU
 438        init_cpu_present(cpu_possible_mask);
 439#endif
 440        cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
 441}
 442
 443/* preload SMP state for boot cpu */
 444void smp_prepare_boot_cpu(void)
 445{
 446        set_cpu_possible(0, true);
 447        set_cpu_online(0, true);
 448}
 449
 450int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 451{
 452        int err;
 453
 454        err = mp_ops->boot_secondary(cpu, tidle);
 455        if (err)
 456                return err;
 457
 458        /* Wait for CPU to start and be ready to sync counters */
 459        if (!wait_for_completion_timeout(&cpu_starting,
 460                                         msecs_to_jiffies(1000))) {
 461                pr_crit("CPU%u: failed to start\n", cpu);
 462                return -EIO;
 463        }
 464
 465        synchronise_count_master(cpu);
 466
 467        /* Wait for CPU to finish startup & mark itself online before return */
 468        wait_for_completion(&cpu_running);
 469        return 0;
 470}
 471
 472/* Not really SMP stuff ... */
 473int setup_profiling_timer(unsigned int multiplier)
 474{
 475        return 0;
 476}
 477
 478static void flush_tlb_all_ipi(void *info)
 479{
 480        local_flush_tlb_all();
 481}
 482
 483void flush_tlb_all(void)
 484{
 485        on_each_cpu(flush_tlb_all_ipi, NULL, 1);
 486}
 487
 488static void flush_tlb_mm_ipi(void *mm)
 489{
 490        local_flush_tlb_mm((struct mm_struct *)mm);
 491}
 492
 493/*
 494 * Special Variant of smp_call_function for use by TLB functions:
 495 *
 496 *  o No return value
 497 *  o collapses to normal function call on UP kernels
 498 *  o collapses to normal function call on systems with a single shared
 499 *    primary cache.
 500 */
 501static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
 502{
 503        smp_call_function(func, info, 1);
 504}
 505
 506static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
 507{
 508        preempt_disable();
 509
 510        smp_on_other_tlbs(func, info);
 511        func(info);
 512
 513        preempt_enable();
 514}
 515
 516/*
 517 * The following tlb flush calls are invoked when old translations are
 518 * being torn down, or pte attributes are changing. For single threaded
 519 * address spaces, a new context is obtained on the current cpu, and tlb
 520 * context on other cpus are invalidated to force a new context allocation
 521 * at switch_mm time, should the mm ever be used on other cpus. For
 522 * multithreaded address spaces, intercpu interrupts have to be sent.
 523 * Another case where intercpu interrupts are required is when the target
 524 * mm might be active on another cpu (eg debuggers doing the flushes on
 525 * behalf of debugees, kswapd stealing pages from another process etc).
 526 * Kanoj 07/00.
 527 */
 528
 529void flush_tlb_mm(struct mm_struct *mm)
 530{
 531        preempt_disable();
 532
 533        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 534                smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
 535        } else {
 536                unsigned int cpu;
 537
 538                for_each_online_cpu(cpu) {
 539                        if (cpu != smp_processor_id() && cpu_context(cpu, mm))
 540                                cpu_context(cpu, mm) = 0;
 541                }
 542        }
 543        local_flush_tlb_mm(mm);
 544
 545        preempt_enable();
 546}
 547
 548struct flush_tlb_data {
 549        struct vm_area_struct *vma;
 550        unsigned long addr1;
 551        unsigned long addr2;
 552};
 553
 554static void flush_tlb_range_ipi(void *info)
 555{
 556        struct flush_tlb_data *fd = info;
 557
 558        local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
 559}
 560
 561void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 562{
 563        struct mm_struct *mm = vma->vm_mm;
 564
 565        preempt_disable();
 566        if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
 567                struct flush_tlb_data fd = {
 568                        .vma = vma,
 569                        .addr1 = start,
 570                        .addr2 = end,
 571                };
 572
 573                smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
 574        } else {
 575                unsigned int cpu;
 576                int exec = vma->vm_flags & VM_EXEC;
 577
 578                for_each_online_cpu(cpu) {
 579                        /*
 580                         * flush_cache_range() will only fully flush icache if
 581                         * the VMA is executable, otherwise we must invalidate
 582                         * ASID without it appearing to has_valid_asid() as if
 583                         * mm has been completely unused by that CPU.
 584                         */
 585                        if (cpu != smp_processor_id() && cpu_context(cpu, mm))
 586                                cpu_context(cpu, mm) = !exec;
 587                }
 588        }
 589        local_flush_tlb_range(vma, start, end);
 590        preempt_enable();
 591}
 592
 593static void flush_tlb_kernel_range_ipi(void *info)
 594{
 595        struct flush_tlb_data *fd = info;
 596
 597        local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
 598}
 599
 600void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 601{
 602        struct flush_tlb_data fd = {
 603                .addr1 = start,
 604                .addr2 = end,
 605        };
 606
 607        on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
 608}
 609
 610static void flush_tlb_page_ipi(void *info)
 611{
 612        struct flush_tlb_data *fd = info;
 613
 614        local_flush_tlb_page(fd->vma, fd->addr1);
 615}
 616
 617void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 618{
 619        preempt_disable();
 620        if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
 621                struct flush_tlb_data fd = {
 622                        .vma = vma,
 623                        .addr1 = page,
 624                };
 625
 626                smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
 627        } else {
 628                unsigned int cpu;
 629
 630                for_each_online_cpu(cpu) {
 631                        /*
 632                         * flush_cache_page() only does partial flushes, so
 633                         * invalidate ASID without it appearing to
 634                         * has_valid_asid() as if mm has been completely unused
 635                         * by that CPU.
 636                         */
 637                        if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
 638                                cpu_context(cpu, vma->vm_mm) = 1;
 639                }
 640        }
 641        local_flush_tlb_page(vma, page);
 642        preempt_enable();
 643}
 644
 645static void flush_tlb_one_ipi(void *info)
 646{
 647        unsigned long vaddr = (unsigned long) info;
 648
 649        local_flush_tlb_one(vaddr);
 650}
 651
 652void flush_tlb_one(unsigned long vaddr)
 653{
 654        smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
 655}
 656
 657EXPORT_SYMBOL(flush_tlb_page);
 658EXPORT_SYMBOL(flush_tlb_one);
 659
 660#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 661
 662static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
 663static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
 664
 665void tick_broadcast(const struct cpumask *mask)
 666{
 667        atomic_t *count;
 668        call_single_data_t *csd;
 669        int cpu;
 670
 671        for_each_cpu(cpu, mask) {
 672                count = &per_cpu(tick_broadcast_count, cpu);
 673                csd = &per_cpu(tick_broadcast_csd, cpu);
 674
 675                if (atomic_inc_return(count) == 1)
 676                        smp_call_function_single_async(cpu, csd);
 677        }
 678}
 679
 680static void tick_broadcast_callee(void *info)
 681{
 682        int cpu = smp_processor_id();
 683        tick_receive_broadcast();
 684        atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
 685}
 686
 687static int __init tick_broadcast_init(void)
 688{
 689        call_single_data_t *csd;
 690        int cpu;
 691
 692        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 693                csd = &per_cpu(tick_broadcast_csd, cpu);
 694                csd->func = tick_broadcast_callee;
 695        }
 696
 697        return 0;
 698}
 699early_initcall(tick_broadcast_init);
 700
 701#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
 702